gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding=utf-8
"""
Collects data from RabbitMQ through the admin interface
#### Notes
** With added support for breaking down queue metrics by vhost, we have
attempted to keep results generated by existing configurations from
changing. This means that the old behaviour of clobbering queue metrics
when a single queue name exists in multiple vhosts still exists if the
configuration is not updated. If no vhosts block is defined it will also
keep the metric path as it was historically with no vhost name in it.
old path => systems.myServer.rabbitmq.queues.myQueue.*
new path => systems.myServer.rabbitmq.myVhost.queues.myQueue.*
** If a [vhosts] section exists but is empty, then no queues will be polled.
** To poll all vhosts and all queues, add the following.
** [vhosts]
** * = *
**
"""
import diamond.collector
import re
from urlparse import urljoin
from urllib import quote
import urllib2
from base64 import b64encode
try:
import json
except ImportError:
import simplejson as json
class RabbitMQClient(object):
"""
Tiny interface into the rabbit http api
"""
def __init__(self, log, host, user, password, timeout=5, scheme="http"):
self.log = log
self.base_url = '%s://%s/api/' % (scheme, host)
self.timeout = timeout
self._authorization = 'Basic ' + b64encode('%s:%s' % (user, password))
def do_call(self, path):
url = urljoin(self.base_url, path)
req = urllib2.Request(url)
req.add_header('Authorization', self._authorization)
return json.load(urllib2.urlopen(req, timeout=self.timeout))
def get_all_vhosts(self):
return self.do_call('vhosts')
def get_vhost_names(self):
return [i['name'] for i in self.get_all_vhosts()]
def get_queue(self, vhost, queue_name):
path = 'queues'
if vhost:
vhost = quote(vhost, '')
queue_name = quote(queue_name, '')
path += '/%s/%s' % (vhost, queue_name)
try:
queue = self.do_call(path)
return queue or None
except Exception as e:
self.log.error('Error querying queue %s/%s: %s' % (
vhost, queue_name, e
))
return None
def get_queues(self, vhost):
path = 'queues'
vhost = quote(vhost, '')
path += '/%s' % vhost
try:
queues = self.do_call(path)
return queues or []
except Exception as e:
self.log.error('Error querying queues %s: %s' % (
vhost, e
))
return []
def get_overview(self):
return self.do_call('overview')
def get_nodes(self):
return self.do_call('nodes')
def get_node(self, node):
return self.do_call('nodes/%s' % node)
class RabbitMQCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(RabbitMQCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname and port to collect from',
'user': 'Username',
'password': 'Password',
'replace_dot':
'A value to replace dot in queue names and vhosts names by',
'replace_slash':
'A value to replace a slash in queue names and vhosts names by',
'queues': 'Queues to publish. Leave empty to publish all.',
'vhosts':
'A list of vhosts and queues for which we want to collect',
'queues_ignored':
'A list of queues or regexes for queue names not to report on.',
'cluster':
'If this node is part of a cluster, will collect metrics on the'
' cluster health',
'query_individual_queues':
'If specific queues are set, query their metrics individually.'
' When this is False, queue metrics will be queried in bulk and'
' filtered, which can time out for vhosts with many queues.'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(RabbitMQCollector, self).get_default_config()
config.update({
'path': 'rabbitmq',
'host': 'localhost:55672',
'user': 'guest',
'password': 'guest',
'replace_dot': False,
'replace_slash': False,
'queues_ignored': '',
'cluster': False,
'scheme': 'http',
'query_individual_queues': False,
})
return config
def collect_health(self):
health_metrics = [
'fd_used',
'fd_total',
'mem_used',
'mem_limit',
'sockets_used',
'sockets_total',
'disk_free_limit',
'disk_free',
'proc_used',
'proc_total',
]
try:
client = RabbitMQClient(self.log,
self.config['host'],
self.config['user'],
self.config['password'],
scheme=self.config['scheme'])
node_name = client.get_overview()['node']
node_data = client.get_node(node_name)
for metric in health_metrics:
self.publish('health.{}'.format(metric), node_data[metric])
if self.config['cluster']:
self.publish('cluster.partitions',
len(node_data['partitions']))
content = client.get_nodes()
self.publish('cluster.nodes', len(content))
except:
self.log.exception('Could not connect to rabbitmq')
return {}
def get_queue_metrics(self, client, vhost, queues):
# Allow the use of a asterix to glob the queues, but replace
# with a empty string to match how legacy config was.
if queues == "*":
queues = ""
allowed_queues = queues.split()
matchers = []
if self.config['queues_ignored']:
for reg in self.config['queues_ignored'].split():
matchers.append(re.compile(reg))
if len(allowed_queues) and self.config['query_individual_queues']:
for queue_name in allowed_queues:
if matchers and any(
[m.match(queue_name) for m in matchers]):
continue
queue = client.get_queue(vhost, queue_name)
if queue is not None:
yield queue
else:
for queue in client.get_queues(vhost):
# If queues are defined and it doesn't match, then skip.
if ((queue['name'] not in allowed_queues and
len(allowed_queues) > 0)):
continue
if matchers and any(
[m.match(queue['name']) for m in matchers]):
continue
yield queue
def get_vhost_conf(self, vhost_names):
legacy = False
if 'vhosts' in self.config:
vhost_conf = self.config['vhosts']
else:
# Legacy configurations, those that don't include the [vhosts]
# section require special care so that we do not break metric
# gathering for people that were using this collector before
# the update to support vhosts.
legacy = True
if 'queues' in self.config:
vhost_conf = {"*": self.config['queues']}
else:
vhost_conf = {"*": ""}
if "*" in vhost_conf:
for vhost in vhost_names:
# Copy the glob queue list to each vhost not
# specifically defined in the configuration.
if vhost not in vhost_conf:
vhost_conf[vhost] = vhost_conf['*']
del vhost_conf["*"]
return vhost_conf, legacy
def collect(self):
self.collect_health()
try:
client = RabbitMQClient(self.log,
self.config['host'],
self.config['user'],
self.config['password'],
scheme=self.config['scheme'])
vhost_names = client.get_vhost_names()
vhost_conf, legacy = self.get_vhost_conf(vhost_names)
# Iterate all vhosts in our vhosts configurations
for vhost, queues in vhost_conf.iteritems():
vhost_name = vhost
if self.config['replace_dot']:
vhost_name = vhost_name.replace(
'.', self.config['replace_dot'])
if self.config['replace_slash']:
vhost_name = vhost_name.replace(
'/', self.config['replace_slash'])
for queue in self.get_queue_metrics(
client, vhost, queues
):
for key in queue:
prefix = "queues"
if not legacy:
prefix = "vhosts.%s.%s" % (vhost_name, "queues")
queue_name = queue['name']
if self.config['replace_dot']:
queue_name = queue_name.replace(
'.', self.config['replace_dot'])
if self.config['replace_slash']:
queue_name = queue_name.replace(
'/', self.config['replace_slash'])
name = '{}.{}'.format(prefix, queue_name)
self._publish_metrics(name, [], key, queue)
overview = client.get_overview()
for key in overview:
self._publish_metrics('', [], key, overview)
except:
self.log.exception('An error occurred collecting from RabbitMQ')
return {}
def _publish_metrics(self, name, prev_keys, key, data):
"""Recursively publish keys"""
value = data[key]
keys = prev_keys + [key]
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(name, keys, new_key, value)
elif isinstance(value, (float, int, long)):
joined_keys = '.'.join(keys)
if name:
publish_key = '{}.{}'.format(name, joined_keys)
else:
publish_key = joined_keys
if isinstance(value, bool):
value = int(value)
self.publish(publish_key, value)
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute API that proxies via Cells Service."""
import oslo_messaging as messaging
from oslo_utils import excutils
from nova import availability_zones
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
check_instance_state = compute_api.check_instance_state
reject_instance_state = compute_api.reject_instance_state
check_instance_lock = compute_api.check_instance_lock
check_instance_cell = compute_api.check_instance_cell
class ComputeRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_rpcapi class with the cells_rpcapi class.
cells_compatible = ['start_instance', 'stop_instance',
'reboot_instance', 'suspend_instance',
'resume_instance', 'terminate_instance',
'soft_delete_instance', 'pause_instance',
'unpause_instance', 'revert_resize',
'confirm_resize', 'reset_network',
'inject_network_info',
'backup_instance', 'snapshot_instance',
'set_admin_password']
def __init__(self, cells_rpcapi):
self.cells_rpcapi = cells_rpcapi
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class ConductorTaskRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_task_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_task_rpcapi class with the cells_rpcapi class.
cells_compatible = ['build_instances', 'resize_instance',
'live_migrate_instance', 'rebuild_instance']
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class RPCClientCellsProxy(object):
def __init__(self, target, version_cap):
super(RPCClientCellsProxy, self).__init__()
self.target = target
self.version_cap = version_cap
self._server = None
self._version = None
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def prepare(self, **kwargs):
ret = type(self)(self.target, self.version_cap)
ret.cells_rpcapi = self.cells_rpcapi
server = kwargs.pop('server', None)
version = kwargs.pop('version', None)
if kwargs:
raise ValueError("Unsupported kwargs: %s" % kwargs.keys())
if server:
ret._server = server
if version:
ret._version = version
return ret
def _check_version_cap(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
if not client.can_send_version(version):
raise messaging.RPCVersionCapError(version=version,
version_cap=self.version_cap)
def _make_msg(self, method, **kwargs):
version = self._version if self._version else self.target.version
self._check_version_cap(version)
return {
'method': method,
'namespace': None,
'version': version,
'args': kwargs
}
def _get_topic(self):
if self._server is not None:
return '%s.%s' % (self.target.topic, self._server)
else:
return self.target.topic
def can_send_version(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
return client.can_send_version(version)
def cast(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
def call(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg,
topic, call=True)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
"""Class used to substitute Compute RPC API that will proxy
via the cells manager to a compute manager in a child cell.
"""
def get_client(self, target, version_cap, serializer):
return RPCClientCellsProxy(target, version_cap)
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
# Avoid casts/calls directly to compute
self.compute_rpcapi = ComputeRPCAPIRedirect(self.cells_rpcapi)
# Redirect conductor build_instances to cells
self.compute_task_api = ConductorTaskRPCAPIRedirect(self.cells_rpcapi)
self._cell_type = 'api'
def _cast_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance.uuid
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
self.cells_rpcapi.cast_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _call_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance.uuid
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
return self.cells_rpcapi.call_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return max_count
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
for completeness.
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
def _create_block_device_mapping(self, *args, **kwargs):
"""Don't create block device mappings in the API cell.
The child cell will create it and propagate it up to the parent cell.
"""
pass
def force_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'force_delete')
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'soft_delete')
def delete(self, context, instance):
self._handle_cell_delete(context, instance, 'delete')
def _handle_cell_delete(self, context, instance, method_name):
if not instance.cell_name:
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance, delete_type)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# NOTE(danms): If we try to delete an instance with no cell,
# there isn't anything to salvage, so we can hard-delete here.
try:
if self._delete_while_booting(context, instance):
return
except exception.ObjectActionError:
# NOTE(alaski): We very likely got here because the host
# constraint in instance.destroy() failed. This likely means
# that an update came up from a child cell and cell_name is
# set now. We handle this similarly to how the
# ObjectActionError is handled below.
with excutils.save_and_reraise_exception() as exc:
_cell, instance = self._lookup_instance(context,
instance.uuid)
if instance is None:
exc.reraise = False
elif instance.cell_name:
exc.reraise = False
self._handle_cell_delete(context, instance,
method_name)
return
# If instance.cell_name was not set it's possible that the Instance
# object here was pulled from a BuildRequest object and is not
# fully populated. Notably it will be missing an 'id' field which
# will prevent instance.destroy from functioning properly. A
# lookup is attempted which will either return a full Instance or
# None if not found. If not found then it's acceptable to skip the
# rest of the delete processing.
_cell, instance = self._lookup_instance(context, instance.uuid)
if instance is None:
# Instance has been deleted out from under us
return
try:
super(ComputeCellsAPI, self)._local_delete(context, instance,
bdms, method_name,
self._do_delete)
except exception.ObjectActionError:
# NOTE(alaski): We very likely got here because the host
# constraint in instance.destroy() failed. This likely means
# that an update came up from a child cell and cell_name is
# set now. If so try the delete again.
with excutils.save_and_reraise_exception() as exc:
try:
instance.refresh()
except exception.InstanceNotFound:
# NOTE(melwitt): If the instance has already been
# deleted by instance_destroy_at_top from a cell,
# instance.refresh() will raise InstanceNotFound.
exc.reraise = False
else:
if instance.cell_name:
exc.reraise = False
self._handle_cell_delete(context, instance,
method_name)
except exception.InstanceNotFound:
# NOTE(melwitt): We can get here if anything tries to
# lookup the instance after an instance_destroy_at_top hits.
pass
return
method = getattr(super(ComputeCellsAPI, self), method_name)
method(context, instance)
@check_instance_cell
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).restore(context, instance)
self._cast_to_cells(context, instance, 'restore')
@check_instance_cell
def evacuate(self, context, instance, host, *args, **kwargs):
"""Evacuate the given instance with the provided attributes."""
if host:
cell_path, host = cells_utils.split_cell_and_item(host)
self._cast_to_cells(context, instance, 'evacuate',
host, *args, **kwargs)
@check_instance_cell
def add_fixed_ip(self, context, instance, *args, **kwargs):
"""Add fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'add_fixed_ip',
*args, **kwargs)
@check_instance_cell
def remove_fixed_ip(self, context, instance, *args, **kwargs):
"""Remove fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'remove_fixed_ip',
*args, **kwargs)
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_diagnostics(context, instance)
return self._call_to_cells(context, instance, 'get_diagnostics')
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_instance_diagnostics(context,
instance)
return self._call_to_cells(context, instance,
'get_instance_diagnostics')
@check_instance_cell
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@check_instance_cell
def unrescue(self, context, instance):
"""Unrescue the given instance."""
super(ComputeCellsAPI, self).unrescue(context, instance)
self._cast_to_cells(context, instance, 'unrescue')
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve the given instance."""
self._cast_to_cells(context, instance, 'shelve',
clean_shutdown=clean_shutdown)
@check_instance_cell
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Offload the shelved instance."""
super(ComputeCellsAPI, self).shelve_offload(context, instance,
clean_shutdown=clean_shutdown)
self._cast_to_cells(context, instance, 'shelve_offload',
clean_shutdown=clean_shutdown)
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Unshelve the given instance."""
self._cast_to_cells(context, instance, 'unshelve')
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_vnc_console(self, context, instance, console_type):
"""Get a url to a VNC Console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_vnc_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_spice_console(self, context, instance, console_type):
"""Get a url to a SPICE Console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_spice_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_rdp_console(self, context, instance, console_type):
"""Get a url to a RDP Console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_rdp_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
connect_info = self._call_to_cells(context, instance,
'get_serial_console_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance.uuid, access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)
return self._call_to_cells(context, instance, 'get_console_output',
*args, **kwargs)
@check_instance_cell
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance."""
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_availability_zone(context, volume,
instance=instance)
return self._call_to_cells(context, instance, 'attach_volume',
volume_id, device, disk_bus, device_type)
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
self.volume_api.check_detach(context, volume, instance=instance)
self._cast_to_cells(context, instance, 'detach_volume',
volume)
@check_instance_cell
def associate_floating_ip(self, context, instance, address):
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
self._cast_to_cells(context, instance, 'associate_floating_ip',
address)
@check_instance_cell
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
super(ComputeCellsAPI, self).delete_instance_metadata(context,
instance, key)
self._cast_to_cells(context, instance, 'delete_instance_metadata',
key)
@check_instance_cell
def update_instance_metadata(self, context, instance,
metadata, delete=False):
rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
instance, metadata, delete=delete)
try:
self._cast_to_cells(context, instance,
'update_instance_metadata',
metadata, delete=delete)
except exception.InstanceUnknownCell:
pass
return rv
def get_migrations(self, context, filters):
return self.cells_rpcapi.get_migrations(context, filters)
class HostAPI(compute_api.HostAPI):
"""HostAPI() class for cells.
Implements host management related operations. Works by setting the
RPC API used by the base class to proxy via the cells manager to the
compute manager in the correct cell. Hosts specified with cells will
need to be of the format 'path!to!cell@host'.
DB methods in the base class are also overridden to proxy via the
cells manager.
"""
def __init__(self):
super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Cannot check this in API cell. This will be checked in the
target child cell.
"""
pass
def set_host_enabled(self, context, host_name, enabled):
try:
result = super(HostAPI, self).set_host_enabled(context, host_name,
enabled)
except exception.CellRoutingInconsistency:
raise exception.HostNotFound(host=host_name)
return result
def host_power_action(self, context, host_name, action):
try:
result = super(HostAPI, self).host_power_action(context, host_name,
action)
except exception.CellRoutingInconsistency:
raise exception.HostNotFound(host=host_name)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
return self.cells_rpcapi.get_host_uptime(context, host_name)
def service_get_all(self, context, filters=None, set_zones=False,
all_cells=False):
"""Get all services.
Note that this is the cellsv1 variant, which means we ignore the
"all_cells" parameter.
"""
if filters is None:
filters = {}
if 'availability_zone' in filters:
zone_filter = filters.pop('availability_zone')
set_zones = True
else:
zone_filter = None
services = self.cells_rpcapi.service_get_all(context,
filters=filters)
if set_zones:
# TODO(sbauza): set_availability_zones returns flat dicts,
# we should rather modify the RPC API to amend service_get_all by
# adding a set_zones argument
services = availability_zones.set_availability_zones(context,
services)
if zone_filter is not None:
services = [s for s in services
if s['availability_zone'] == zone_filter]
# NOTE(sbauza): As services is a list of flat dicts, we need to
# rehydrate the corresponding ServiceProxy objects
cell_paths = []
for service in services:
cell_path, id = cells_utils.split_cell_and_item(service['id'])
cell_path, host = cells_utils.split_cell_and_item(
service['host'])
service['id'] = id
service['host'] = host
cell_paths.append(cell_path)
services = obj_base.obj_make_list(context,
objects.ServiceList(),
objects.Service,
services)
services = [cells_utils.ServiceProxy(s, c)
for s, c in zip(services, cell_paths)]
return services
def service_get_by_compute_host(self, context, host_name):
try:
return self.cells_rpcapi.service_get_by_compute_host(context,
host_name)
except exception.CellRoutingInconsistency:
raise exception.ComputeHostNotFound(host=host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return self.cells_rpcapi.service_update(
context, host_name, binary, params_to_update)
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self.cells_rpcapi.service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Get all instances by host. Host might have a cell prepended
to it, so we'll need to strip it out. We don't need to proxy
this call to cells, as we have instance information here in
the API cell.
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
instances = [i for i in instances
if i['cell_name'] == cell_name]
return instances
def task_log_get_all(self, context, task_name, beginning, ending,
host=None, state=None):
"""Return the task logs within a given range from cells,
optionally filtering by the host and/or state. For cells, the
host should be a path like 'path!to!cell@host'. If no @host
is given, only task logs from a particular cell will be returned.
"""
return self.cells_rpcapi.task_log_get_all(context,
task_name,
beginning,
ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Get a compute node from a particular cell by its integer ID.
compute_id should be in the format of 'path!to!cell@ID'.
"""
try:
return self.cells_rpcapi.compute_node_get(context, compute_id)
except exception.CellRoutingInconsistency:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_get_all(self, context, limit=None, marker=None):
# NOTE(lyj): No pagination for cells, just make sure the arguments
# for the method are the same with the compute.api for now.
return self.cells_rpcapi.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.cells_rpcapi.compute_node_get_all(context,
hypervisor_match=hypervisor_match)
def compute_node_statistics(self, context):
return self.cells_rpcapi.compute_node_stats(context)
class InstanceActionAPI(compute_api.InstanceActionAPI):
"""InstanceActionAPI() class for cells."""
def __init__(self):
super(InstanceActionAPI, self).__init__()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def actions_get(self, context, instance):
return self.cells_rpcapi.actions_get(context, instance)
def action_get_by_request_id(self, context, instance, request_id):
return self.cells_rpcapi.action_get_by_request_id(context, instance,
request_id)
def action_events_get(self, context, instance, action_id):
return self.cells_rpcapi.action_events_get(context, instance,
action_id)
|
|
# firebird/base.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird
:name: Firebird
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\\
where(empl.c.sales>100).\\
values(dict(salary=empl.c.salary * 1.1))
print raises.fetchall()
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
from sqlalchemy.sql import compiler
from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
RESERVED_WORDS = set([
"active", "add", "admin", "after", "all", "alter", "and", "any", "as",
"asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
"bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
"character", "character_length", "char_length", "check", "close",
"collate", "column", "commit", "committed", "computed", "conditional",
"connect", "constraint", "containing", "count", "create", "cross",
"cstring", "current", "current_connection", "current_date",
"current_role", "current_time", "current_timestamp",
"current_transaction", "current_user", "cursor", "database", "date",
"day", "dec", "decimal", "declare", "default", "delete", "desc",
"descending", "disconnect", "distinct", "do", "domain", "double",
"drop", "else", "end", "entry_point", "escape", "exception",
"execute", "exists", "exit", "external", "extract", "fetch", "file",
"filter", "float", "for", "foreign", "from", "full", "function",
"gdscode", "generator", "gen_id", "global", "grant", "group",
"having", "hour", "if", "in", "inactive", "index", "inner",
"input_type", "insensitive", "insert", "int", "integer", "into", "is",
"isolation", "join", "key", "leading", "left", "length", "level",
"like", "long", "lower", "manual", "max", "maximum_segment", "merge",
"min", "minute", "module_name", "month", "names", "national",
"natural", "nchar", "no", "not", "null", "numeric", "octet_length",
"of", "on", "only", "open", "option", "or", "order", "outer",
"output_type", "overflow", "page", "pages", "page_size", "parameter",
"password", "plan", "position", "post_event", "precision", "primary",
"privileges", "procedure", "protected", "rdb$db_key", "read", "real",
"record_version", "recreate", "recursive", "references", "release",
"reserv", "reserving", "retain", "returning_values", "returns",
"revoke", "right", "rollback", "rows", "row_count", "savepoint",
"schema", "second", "segment", "select", "sensitive", "set", "shadow",
"shared", "singular", "size", "smallint", "snapshot", "some", "sort",
"sqlcode", "stability", "start", "starting", "starts", "statistics",
"sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
"to", "trailing", "transaction", "trigger", "trim", "uncommitted",
"union", "unique", "update", "upper", "user", "using", "value",
"values", "varchar", "variable", "varying", "view", "wait", "when",
"where", "while", "with", "work", "write", "year",
])
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {
sqltypes.DateTime: _FBDateTime
}
ischema_names = {
'SHORT': SMALLINT,
'LONG': INTEGER,
'QUAD': FLOAT,
'FLOAT': FLOAT,
'DATE': DATE,
'TIME': TIME,
'TEXT': TEXT,
'INT64': BIGINT,
'DOUBLE': FLOAT,
'TIMESTAMP': TIMESTAMP,
'VARYING': VARCHAR,
'CSTRING': CHAR,
'BLOB': BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_TEXT(self, type_):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, 'charset', None)
if charset is None:
return basic
else:
return '%s CHARACTER SET %s' % (basic, charset)
def visit_CHAR(self, type_):
basic = super(FBTypeCompiler, self).visit_CHAR(type_)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
#def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
#def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return '%s STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return '%s NOT STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).\
visit_alias(alias, asfrom=asfrom, **kwargs)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = isinstance(alias.name,
expression._truncated_label) and \
self._truncated_identifier("alias",
alias.name) or alias.name
return self.process(
alias.original, asfrom=asfrom, **kwargs) + \
" " + \
self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit:
result += "FIRST %s " % self.process(sql.literal(select._limit))
if select._offset:
result += "SKIP %s " % self.process(sql.literal(select._offset))
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support START WITH")
if create.element.increment is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support INCREMENT BY")
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
else:
return "CREATE GENERATOR %s" % \
self.preparer.format_sequence(create.element)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
else:
return "DROP GENERATOR %s" % \
self.preparer.format_sequence(drop.element)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = 'firebird'
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = ('firebird' in self.server_version_info and \
self.server_version_info >= (2, )
) or \
('interbase' in self.server_version_info and \
self.server_version_info >= (6, )
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names['TIMESTAMP'] = sqltypes.DATE
self.colspecs = {
sqltypes.DateTime: sqltypes.DATE
}
self.implicit_returning = self._version_two and \
self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
# that is padded with spaces
name = name and name.rstrip()
if name is None:
return None
elif name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
s = """
SELECT DISTINCT rdb$relation_name
FROM rdb$relation_fields
WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
s = """
SELECT distinct rdb$view_name
FROM rdb$view_relations
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row['view_source']
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
return {'constrained_columns': pkfields, 'name': None}
@reflection.cache
def get_column_sequence(self, connection,
table_name, column_name,
schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr['fgenerator']))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint['constrained_columns']
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row['fname'])
orig_colname = row['fname']
# get the data type
colspec = row['ftype'].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn("Did not recognize type '%s' of column '%s'" %
(colspec, name))
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row['fprec'] != 0:
coltype = NUMERIC(
precision=row['fprec'],
scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
elif colspec == 'TEXT':
coltype = TEXT(row['flen'])
elif colspec == 'BLOB':
if row['stype'] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip().upper() == \
'DEFAULT', "Unrecognized default value: %s" % \
defexpr
defvalue = defexpr[8:].strip()
if defvalue == 'NULL':
# Redundant
defvalue = None
col_d = {
'name': name,
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
'autoincrement': defvalue is None
}
if orig_colname.lower() == orig_colname:
col_d['quote'] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d['sequence'] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(lambda: {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
})
for row in c:
cname = self.normalize_name(row['cname'])
fk = fks[cname]
if not fk['name']:
fk['name'] = cname
fk['referred_table'] = self.normalize_name(row['targetrname'])
fk['constrained_columns'].append(
self.normalize_name(row['fname']))
fk['referred_columns'].append(
self.normalize_name(row['targetfname']))
return fks.values()
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, field_name
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row['index_name']]
if 'name' not in indexrec:
indexrec['name'] = self.normalize_name(row['index_name'])
indexrec['column_names'] = []
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(
self.normalize_name(row['field_name']))
return indexes.values()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from oslo.config import cfg
from heat.tests import fakes
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.engine import environment
from heat.common import identifier
from heat.common import template_format
from heat.engine import parser
from heat.engine import scheduler
from heat.engine import service
from heat.engine.resources import instance
from heat.engine.resources import wait_condition as wc
test_template_metadata = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"S1": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"AWS::CloudFormation::Init" : {
"config" : {
"files" : {
"/tmp/random_file" : {
"content" : { "Fn::Join" : ["", [
"s2-ip=", {"Fn::GetAtt": ["S2", "PublicIp"]}
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
}
}
}
}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
test_template_waitcondition = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a WaitCondition.",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"WH" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"S1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : { "Fn::Join" : [ "", [ "#!/bin/bash -v\n",
"echo ",
{ "Ref" : "WH" },
"\n" ] ] }
}
},
"WC" : {
"Type" : "AWS::CloudFormation::WaitCondition",
"DependsOn": "S1",
"Properties" : {
"Handle" : {"Ref" : "WH"},
"Timeout" : "5"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"test" : {"Fn::GetAtt": ["WC", "Data"]}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
class MetadataRefreshTest(HeatTestCase):
'''
The point of the test is to confirm that metadata gets updated
when FnGetAtt() returns something different.
gets called.
'''
def setUp(self):
super(MetadataRefreshTest, self).setUp()
self.fc = fakes.FakeKeystoneClient()
utils.setup_dummy_db()
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the stack is properly cleaned up
def create_stack(self, stack_name='test_stack', params={}):
temp = template_format.parse(test_template_metadata)
template = parser.Template(temp)
ctx = utils.dummy_context()
stack = parser.Stack(ctx, stack_name, template,
environment.Environment(params),
disable_rollback=True)
self.stack_id = stack.store()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
for cookie in (object(), object()):
instance.Instance.handle_create().AndReturn(cookie)
create_complete = instance.Instance.check_create_complete(cookie)
create_complete.InAnyOrder().AndReturn(True)
self.m.StubOutWithMock(instance.Instance, 'FnGetAtt')
return stack
@utils.stack_delete_after
def test_FnGetAtt(self):
self.stack = self.create_stack()
instance.Instance.FnGetAtt('PublicIp').AndReturn('1.2.3.5')
# called by metadata_update()
instance.Instance.FnGetAtt('PublicIp').AndReturn('10.0.0.5')
self.m.ReplayAll()
self.stack.create()
self.assertEqual(self.stack.state,
(self.stack.CREATE, self.stack.COMPLETE))
s1 = self.stack['S1']
s2 = self.stack['S2']
files = s1.metadata['AWS::CloudFormation::Init']['config']['files']
cont = files['/tmp/random_file']['content']
self.assertEqual((s2.CREATE, s2.COMPLETE), s2.state)
self.assertEqual(cont, 's2-ip=1.2.3.5')
s1.metadata_update()
s2.metadata_update()
files = s1.metadata['AWS::CloudFormation::Init']['config']['files']
cont = files['/tmp/random_file']['content']
self.assertEqual(cont, 's2-ip=10.0.0.5')
self.m.VerifyAll()
class WaitCondMetadataUpdateTest(HeatTestCase):
def setUp(self):
super(WaitCondMetadataUpdateTest, self).setUp()
utils.setup_dummy_db()
self.fc = fakes.FakeKeystoneClient()
self.man = service.EngineService('a-host', 'a-topic')
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://server.test:8000/v1/waitcondition')
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the stack is properly cleaned up
def create_stack(self, stack_name='test_stack'):
temp = template_format.parse(test_template_waitcondition)
template = parser.Template(temp)
ctx = utils.dummy_context()
stack = parser.Stack(ctx, stack_name, template, disable_rollback=True)
self.stack_id = stack.store()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
for cookie in (object(), object()):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
self.m.StubOutWithMock(wc.WaitConditionHandle, 'keystone')
wc.WaitConditionHandle.keystone().MultipleTimes().AndReturn(self.fc)
id = identifier.ResourceIdentifier('test_tenant_id', stack.name,
stack.id, '', 'WH')
self.m.StubOutWithMock(wc.WaitConditionHandle, 'identifier')
wc.WaitConditionHandle.identifier().MultipleTimes().AndReturn(id)
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
self.m.StubOutWithMock(service.EngineService, '_load_user_creds')
service.EngineService._load_user_creds(
mox.IgnoreArg()).MultipleTimes().AndReturn(ctx)
return stack
@utils.stack_delete_after
def test_wait_meta(self):
'''
1 create stack
2 assert empty instance metadata
3 service.metadata_update()
4 assert valid waitcond metadata
5 assert valid instance metadata
'''
self.stack = self.create_stack()
watch = self.stack['WC']
inst = self.stack['S2']
def check_empty(sleep_time):
self.assertEqual(watch.FnGetAtt('Data'), '{}')
self.assertEqual(inst.metadata['test'], None)
def update_metadata(id, data, reason):
self.man.metadata_update(utils.dummy_context(),
dict(self.stack.identifier()),
'WH',
{'Data': data, 'Reason': reason,
'Status': 'SUCCESS', 'UniqueId': id})
def post_success(sleep_time):
update_metadata('123', 'foo', 'bar')
scheduler.TaskRunner._sleep(mox.IsA(int)).WithSideEffects(check_empty)
scheduler.TaskRunner._sleep(mox.IsA(int)).WithSideEffects(post_success)
scheduler.TaskRunner._sleep(mox.IsA(int)).MultipleTimes().AndReturn(
None)
self.m.ReplayAll()
self.stack.create()
self.assertEqual(self.stack.state,
(self.stack.CREATE, self.stack.COMPLETE))
self.assertEqual(watch.FnGetAtt('Data'), '{"123": "foo"}')
self.assertEqual(inst.metadata['test'], '{"123": "foo"}')
update_metadata('456', 'blarg', 'wibble')
self.assertEqual(watch.FnGetAtt('Data'),
'{"123": "foo", "456": "blarg"}')
self.assertEqual(inst.metadata['test'],
'{"123": "foo", "456": "blarg"}')
self.m.VerifyAll()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"managementPolicyName": _SERIALIZER.url("management_policy_name", management_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"managementPolicyName": _SERIALIZER.url("management_policy_name", management_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"managementPolicyName": _SERIALIZER.url("management_policy_name", management_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
class ManagementPoliciesOperations(object):
"""ManagementPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
**kwargs: Any
) -> "_models.ManagementPolicy":
"""Gets the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.ManagementPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
management_policy_name=management_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
properties: "_models.ManagementPolicy",
**kwargs: Any
) -> "_models.ManagementPolicy":
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account.
:type properties: ~azure.mgmt.storage.v2021_06_01.models.ManagementPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.ManagementPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(properties, 'ManagementPolicy')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
management_policy_name=management_policy_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
**kwargs: Any
) -> None:
"""Deletes the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
management_policy_name=management_policy_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}'} # type: ignore
|
|
# Natural Language Toolkit: Interface to the Mace4 Model Builder
#
# Author: Dan Garrette <dhgarrette@gmail.com>
# Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A model builder that makes use of the external 'Mace4' package.
"""
from __future__ import print_function
import os
import tempfile
from nltk.sem.logic import is_indvar
from nltk.sem import Valuation, Expression
from nltk.inference.api import ModelBuilder, BaseModelBuilderCommand
from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent
class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand):
"""
A ``MaceCommand`` specific to the ``Mace`` model builder. It contains
a print_assumptions() method that is used to print the list
of assumptions in multiple formats.
"""
_interpformat_bin = None
def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None):
"""
:param goal: Input expression to prove
:type goal: sem.Expression
:param assumptions: Input expressions to use as assumptions in
the proof.
:type assumptions: list(sem.Expression)
:param max_models: The maximum number of models that Mace will try before
simply returning false. (Use 0 for no maximum.)
:type max_models: int
"""
if model_builder is not None:
assert isinstance(model_builder, Mace)
else:
model_builder = Mace(max_models)
BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions)
@property
def valuation(mbc): return mbc.model('valuation')
def _convert2val(self, valuation_str):
"""
Transform the output file into an NLTK-style Valuation.
:return: A model if one is generated; None otherwise.
:rtype: sem.Valuation
"""
valuation_standard_format = self._transform_output(valuation_str, 'standard')
val = []
for line in valuation_standard_format.splitlines(False):
l = line.strip()
if l.startswith('interpretation'):
# find the number of entities in the model
num_entities = int(l[l.index('(')+1:l.index(',')].strip())
elif l.startswith('function') and l.find('_') == -1:
# replace the integer identifier with a corresponding alphabetic character
name = l[l.index('(')+1:l.index(',')].strip()
if is_indvar(name):
name = name.upper()
value = int(l[l.index('[')+1:l.index(']')].strip())
val.append((name, MaceCommand._make_model_var(value)))
elif l.startswith('relation'):
l = l[l.index('(')+1:]
if '(' in l:
#relation is not nullary
name = l[:l.index('(')].strip()
values = [int(v.strip()) for v in l[l.index('[')+1:l.index(']')].split(',')]
val.append((name, MaceCommand._make_relation_set(num_entities, values)))
else:
#relation is nullary
name = l[:l.index(',')].strip()
value = int(l[l.index('[')+1:l.index(']')].strip())
val.append((name, value == 1))
return Valuation(val)
@staticmethod
def _make_relation_set(num_entities, values):
"""
Convert a Mace4-style relation table into a dictionary.
:param num_entities: the number of entities in the model; determines the row length in the table.
:type num_entities: int
:param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model.
:type values: list of int
"""
r = set()
for position in [pos for (pos,v) in enumerate(values) if v == 1]:
r.add(tuple(MaceCommand._make_relation_tuple(position, values, num_entities)))
return r
@staticmethod
def _make_relation_tuple(position, values, num_entities):
if len(values) == 1:
return []
else:
sublist_size = len(values) // num_entities
sublist_start = position // sublist_size
sublist_position = int(position % sublist_size)
sublist = values[sublist_start*sublist_size:(sublist_start+1)*sublist_size]
return [MaceCommand._make_model_var(sublist_start)] + \
MaceCommand._make_relation_tuple(sublist_position,
sublist,
num_entities)
@staticmethod
def _make_model_var(value):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
letter = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n',
'o','p','q','r','s','t','u','v','w','x','y','z'][value]
num = value // 26
return (letter + str(num) if num > 0 else letter)
def _decorate_model(self, valuation_str, format):
"""
Print out a Mace4 model using any Mace4 ``interpformat`` format.
See http://www.cs.unm.edu/~mccune/mace4/manual/ for details.
:param valuation_str: str with the model builder's output
:param format: str indicating the format for displaying
models. Defaults to 'standard' format.
:return: str
"""
if not format:
return valuation_str
elif format == 'valuation':
return self._convert2val(valuation_str)
else:
return self._transform_output(valuation_str, format)
def _transform_output(self, valuation_str, format):
"""
Transform the output file into any Mace4 ``interpformat`` format.
:param format: Output format for displaying models.
:type format: str
"""
if format in ['standard', 'standard2', 'portable', 'tabular',
'raw', 'cooked', 'xml', 'tex']:
return self._call_interpformat(valuation_str, [format])[0]
else:
raise LookupError("The specified format does not exist")
def _call_interpformat(self, input_str, args=[], verbose=False):
"""
Call the ``interpformat`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._interpformat_bin is None:
self._interpformat_bin = self._modelbuilder._find_binary(
'interpformat', verbose)
return self._modelbuilder._call(input_str, self._interpformat_bin,
args, verbose)
class Mace(Prover9Parent, ModelBuilder):
_mace4_bin = None
def __init__(self, end_size=500):
self._end_size = end_size
"""The maximum model size that Mace will try before
simply returning false. (Use -1 for no maximum.)"""
def _build_model(self, goal=None, assumptions=None, verbose=False):
"""
Use Mace4 to build a first order model.
:return: ``True`` if a model was found (i.e. Mace returns value of 0),
else ``False``
"""
if not assumptions:
assumptions = []
stdout, returncode = self._call_mace4(self.prover9_input(goal, assumptions),
verbose=verbose)
return (returncode == 0, stdout)
def _call_mace4(self, input_str, args=[], verbose=False):
"""
Call the ``mace4`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._mace4_bin is None:
self._mace4_bin = self._find_binary('mace4', verbose)
updated_input_str = ''
if self._end_size > 0:
updated_input_str += 'assign(end_size, %d).\n\n' % self._end_size
updated_input_str += input_str
return self._call(updated_input_str, self._mace4_bin, args, verbose)
def spacer(num=30):
print('-' * num)
def decode_result(found):
"""
Decode the result of model_found()
:param found: The output of model_found()
:type found: bool
"""
return {True: 'Countermodel found', False: 'No countermodel found', None: 'None'}[found]
def test_model_found(arguments):
"""
Try some proofs and exhibit the results.
"""
for (goal, assumptions) in arguments:
g = Expression.fromstring(goal)
alist = [lp.parse(a) for a in assumptions]
m = MaceCommand(g, assumptions=alist, max_models=50)
found = m.build_model()
for a in alist:
print(' %s' % a)
print('|- %s: %s\n' % (g, decode_result(found)))
def test_build_model(arguments):
"""
Try to build a ``nltk.sem.Valuation``.
"""
g = Expression.fromstring('all x.man(x)')
alist = [Expression.fromstring(a) for a in ['man(John)',
'man(Socrates)',
'man(Bill)',
'some x.(-(x = John) & man(x) & sees(John,x))',
'some x.(-(x = Bill) & man(x))',
'all x.some y.(man(x) -> gives(Socrates,x,y))']]
m = MaceCommand(g, assumptions=alist)
m.build_model()
spacer()
print("Assumptions and Goal")
spacer()
for a in alist:
print(' %s' % a)
print('|- %s: %s\n' % (g, decode_result(m.build_model())))
spacer()
#print m.model('standard')
#print m.model('cooked')
print("Valuation")
spacer()
print(m.valuation, '\n')
def test_transform_output(argument_pair):
"""
Transform the model into various Mace4 ``interpformat`` formats.
"""
g = Expression.fromstring(argument_pair[0])
alist = [lp.parse(a) for a in argument_pair[1]]
m = MaceCommand(g, assumptions=alist)
m.build_model()
for a in alist:
print(' %s' % a)
print('|- %s: %s\n' % (g, m.build_model()))
for format in ['standard', 'portable', 'xml', 'cooked']:
spacer()
print("Using '%s' format" % format)
spacer()
print(m.model(format=format))
def test_make_relation_set():
print(MaceCommand._make_relation_set(num_entities=3, values=[1,0,1]) == set([('c',), ('a',)]))
print(MaceCommand._make_relation_set(num_entities=3, values=[0,0,0,0,0,0,1,0,0]) == set([('c', 'a')]))
print(MaceCommand._make_relation_set(num_entities=2, values=[0,0,1,0,0,0,1,0]) == set([('a', 'b', 'a'), ('b', 'b', 'a')]))
arguments = [
('mortal(Socrates)', ['all x.(man(x) -> mortal(x))', 'man(Socrates)']),
('(not mortal(Socrates))', ['all x.(man(x) -> mortal(x))', 'man(Socrates)'])
]
def demo():
test_model_found(arguments)
test_build_model(arguments)
test_transform_output(arguments[1])
if __name__ == '__main__':
demo()
|
|
"""
Cart pole swing-up: Original version from:
https://github.com/zuoxingdong/DeepPILCO/blob/master/cartpole_swingup.py
Modified so that done=True when x is outside of -2.4 to 2.4
Reward is also reshaped to be similar to PyBullet/roboschool version
"""
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
# logger = logging.getLogger(__name__)
class CartPoleSwingUpHarderEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self, simple=False, redundant_obs=False):
self.simple = simple
self.redundant_obs = redundant_obs
self.g = 9.82 # gravity
self.m_c = 0.5 # cart mass
self.m_p = 0.5 # pendulum mass
self.total_m = (self.m_p + self.m_c)
self.l = 0.6 # pole's length
self.m_p_l = (self.m_p * self.l)
self.force_mag = 10.0
self.dt = 0.01 # seconds between state updates
self.b = 0.1 # friction coefficient
self.t = 0 # timestep
self.t_limit = 1000
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
high = np.array([
np.finfo(np.float32).max,
np.finfo(np.float32).max,
np.finfo(np.float32).max,
np.finfo(np.float32).max,
np.finfo(np.float32).max])
if self.redundant_obs:
high = np.concatenate([high] * 2, axis=0)
self.action_space = spaces.Box(-1.0, 1.0, shape=(1,))
self.observation_space = spaces.Box(-high, high)
self.np_random = None
self.seed()
self.viewer = None
self.state = None
self.prev_state = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
# Valid action
action = np.clip(action, -1.0, 1.0)[0]
action *= self.force_mag
state = self.state
x, x_dot, theta, theta_dot = state
s = math.sin(theta)
c = math.cos(theta)
xdot_update = (
(-2 * self.m_p_l * (theta_dot ** 2) * s +
3 * self.m_p * self.g * s * c +
4 * action - 4 * self.b * x_dot) /
(4 * self.total_m - 3 * self.m_p * c ** 2)
)
thetadot_update = (
(-3 * self.m_p_l * (theta_dot ** 2) * s * c +
6 * self.total_m * self.g * s +
6 * (action - self.b * x_dot) * c) /
(4 * self.l * self.total_m - 3 * self.m_p_l * c ** 2)
)
x = x + x_dot * self.dt
theta = theta + theta_dot * self.dt
x_dot = x_dot + xdot_update * self.dt
theta_dot = theta_dot + thetadot_update * self.dt
self.state = (x, x_dot, theta, theta_dot)
done = False
if x < -self.x_threshold or x > self.x_threshold:
done = True
self.t += 1
if self.t >= self.t_limit:
done = True
reward_theta = (np.cos(theta) + 1.0) / 2.0
reward_x = np.cos((x / self.x_threshold) * (np.pi / 2.0))
reward = reward_theta * reward_x
prev_x, prev_x_dot, prev_theta, prev_theta_dot = self.prev_state
c = np.cos(theta)
s = np.sin(theta)
# prev_c = np.cos(prev_theta)
# prev_s = np.sin(prev_theta)
# print("debug", theta-prev_theta, theta, prev_theta)
# obs = np.array([x, (x-prev_x)/self.dt, c, s, (theta-prev_theta)/self.dt])
obs = np.array([x, x_dot, c, s, theta_dot])
# obs = np.array([x, x_dot, theta, theta_dot])
if self.redundant_obs:
obs = np.concatenate([obs] * 2, axis=0)
self.prev_state = self.state
return obs, reward, done, {}
def reset(self):
if self.simple:
self.state = self.np_random.normal(
loc=np.array([0.0, 0.0, np.pi, 0.0]),
scale=np.array([0.2, 0.2, 0.2, 0.2]),
)
else:
[rand_x, rand_x_dot, rand_theta, rand_theta_dot] = np.multiply(
self.np_random.rand(4) * 2 - 1,
[self.x_threshold, 10., np.pi / 2., 10.])
self.state = np.array(
[rand_x, rand_x_dot, np.pi + rand_theta, rand_theta_dot])
self.prev_state = self.state
self.t = 0 # timestep
x, x_dot, theta, theta_dot = self.state
obs = np.array([x, x_dot, np.cos(theta), np.sin(theta),
theta_dot]) # set zero for init differences
# obs = np.array([x, x_dot, theta, theta_dot]) # set zero for init
# differences
if self.redundant_obs:
obs = np.concatenate([obs] * 2, axis=0)
return obs
def render(self, mode='human', close=False, override_state=None):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
if self.state is None: return None
screen_width = 600
screen_height = 600 # before was 400
world_width = 5 # max visible position of cart
scale = screen_width / world_width
carty = screen_height / 2 # TOP OF CART
polewidth = 6.0
polelen = scale * self.l # 0.6 or self.l
cartwidth = 40.0
cartheight = 20.0
extra_color = 0.0
if override_state != None:
extra_color = 0.75
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
# real cart
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
self.cart = rendering.FilledPolygon(
[(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
self.cart.add_attr(self.carttrans)
self.cart.set_color(1.0, extra_color, extra_color)
self.viewer.add_geom(self.cart)
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
self.pole = rendering.FilledPolygon(
[(l, b), (l, t), (r, t), (r, b)])
self.pole.set_color(extra_color, extra_color, 1)
self.poletrans = rendering.Transform(translation=(0, 0))
self.pole.add_attr(self.poletrans)
self.pole.add_attr(self.carttrans)
self.viewer.add_geom(self.pole)
self.axle = rendering.make_circle(polewidth / 2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(0.1, 1, 1)
self.viewer.add_geom(self.axle)
# Make another circle on the top of the pole
self.pole_bob = rendering.make_circle(polewidth / 2)
self.pole_bob_trans = rendering.Transform()
self.pole_bob.add_attr(self.pole_bob_trans)
self.pole_bob.add_attr(self.poletrans)
self.pole_bob.add_attr(self.carttrans)
self.pole_bob.set_color(0, 0, 0)
self.viewer.add_geom(self.pole_bob)
self.wheel_l = rendering.make_circle(cartheight / 4)
self.wheel_r = rendering.make_circle(cartheight / 4)
self.wheeltrans_l = rendering.Transform(
translation=(-cartwidth / 2, -cartheight / 2))
self.wheeltrans_r = rendering.Transform(
translation=(cartwidth / 2, -cartheight / 2))
self.wheel_l.add_attr(self.wheeltrans_l)
self.wheel_l.add_attr(self.carttrans)
self.wheel_r.add_attr(self.wheeltrans_r)
self.wheel_r.add_attr(self.carttrans)
self.wheel_l.set_color(0, 0, 0) # Black, (B, G, R)
self.wheel_r.set_color(0, 0, 0) # Black, (B, G, R)
self.viewer.add_geom(self.wheel_l)
self.viewer.add_geom(self.wheel_r)
# dream cart
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
dream_cart = rendering.PolyLine([(l, b), (l, t), (r, t), (r, b)],
True)
self.dream_carttrans = rendering.Transform()
dream_cart.add_attr(self.dream_carttrans)
dream_cart.set_color(0.25, 0.25, 0.25)
self.viewer.add_geom(dream_cart)
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
dream_pole = rendering.PolyLine([(l, b), (l, t), (r, t), (r, b)],
True)
dream_pole.set_color(0.25, 0.25, 0.25)
self.dream_poletrans = rendering.Transform(translation=(0, 0))
dream_pole.add_attr(self.dream_poletrans)
dream_pole.add_attr(self.dream_carttrans)
self.viewer.add_geom(dream_pole)
self.dream_axle = rendering.make_circle(polewidth / 2, filled=False)
self.dream_axle.add_attr(self.dream_poletrans)
self.dream_axle.add_attr(self.dream_carttrans)
self.dream_axle.set_color(0.1, .25, .25)
self.viewer.add_geom(self.dream_axle)
# Make another circle on the top of the pole
self.dream_pole_bob = rendering.make_circle(polewidth / 2,
filled=False)
self.dream_pole_bob_trans = rendering.Transform()
self.dream_pole_bob.add_attr(self.dream_pole_bob_trans)
self.dream_pole_bob.add_attr(self.dream_poletrans)
self.dream_pole_bob.add_attr(self.dream_carttrans)
self.dream_pole_bob.set_color(0.25, 0.25, 0.25)
self.viewer.add_geom(self.dream_pole_bob)
self.dream_wheel_l = rendering.make_circle(
cartheight / 4, filled=False)
self.dream_wheel_r = rendering.make_circle(
cartheight / 4, filled=False)
self.dream_wheeltrans_l = rendering.Transform(
translation=(-cartwidth / 2, -cartheight / 2))
self.dream_wheeltrans_r = rendering.Transform(
translation=(cartwidth / 2, -cartheight / 2))
self.dream_wheel_l.add_attr(self.dream_wheeltrans_l)
self.dream_wheel_l.add_attr(self.dream_carttrans)
self.dream_wheel_r.add_attr(self.dream_wheeltrans_r)
self.dream_wheel_r.add_attr(self.dream_carttrans)
self.dream_wheel_l.set_color(0.25, 0.25, 0.25)
self.dream_wheel_r.set_color(0.25, 0.25, 0.25)
self.viewer.add_geom(self.dream_wheel_l)
self.viewer.add_geom(self.dream_wheel_r)
# others:
self.track = rendering.Line(
(screen_width / 2 - self.x_threshold * scale,
carty - cartheight / 2 - cartheight / 4),
(screen_width / 2 + self.x_threshold * scale,
carty - cartheight / 2 - cartheight / 4)
)
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
x = self.state
dream_x = self.state
if override_state != None:
dream_x = override_state
# flash when we peek
self.cart.set_color(1.0, extra_color, extra_color)
self.pole.set_color(extra_color, extra_color, 1)
# real cart
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(x[2])
self.pole_bob_trans.set_translation(-self.l * np.sin(x[2]),
self.l * np.cos(x[2]))
# dream cart
dream_cartx = dream_x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.dream_carttrans.set_translation(dream_cartx, carty)
self.dream_poletrans.set_rotation(dream_x[2])
self.dream_pole_bob_trans.set_translation(-self.l * np.sin(dream_x[2]),
self.l * np.cos(dream_x[2]))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
|
|
"""Freezer swift.py related tests
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import io
import os
import fixtures
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
import testtools
from freezer_api.common import config
from freezer_api import policy
CONF = cfg.CONF
fake_data_0_backup_id = 'b740ed9ad2b646aba304ef54c21c6774'
fake_data_0_user_id = 'qwerty1234'
fake_data_0_user_name = 'asdffdsa'
fake_data_0_wrapped_backup_metadata = {
'backup_id': 'b740ed9ad2b646aba304ef54c21c6774',
'user_id': 'qwerty1234',
'user_name': 'asdffdsa',
'backup_metadata': {
"container": "freezer_container",
"hostname": "alpha",
"backup_name": "important_data_backup",
"time_stamp": 8475903425,
"curr_backup_level": 0,
"backup_session": 8475903425,
"max_level": 5,
"mode": "fs",
"fs_real_path": "/blabla",
"vol_snap_path": "/blablasnap",
"total_broken_links": 0,
"total_fs_files": 11,
"total_directories": 2,
"backup_size_uncompressed": 4567,
"backup_size_compressed": 1212,
"total_backup_session_size": 6789,
"compression_alg": "None",
"encrypted": "false",
"client_os": "linux",
"broken_links": ["link_01", "link_02"],
"excluded_files": ["excluded_file_01", "excluded_file_02"],
"cli": ""
}
}
fake_data_0_backup_metadata = {
"container": "freezer_container",
"hostname": "alpha",
"backup_name": "important_data_backup",
"time_stamp": 8475903425,
"curr_backup_level": 0,
"backup_session": 8475903425,
"max_level": 5,
"mode": "fs",
"fs_real_path": "/blabla",
"vol_snap_path": "/blablasnap",
"total_broken_links": 0,
"total_fs_files": 11,
"total_directories": 2,
"backup_size_uncompressed": 4567,
"backup_size_compressed": 1212,
"total_backup_session_size": 6789,
"compression_alg": "None",
"encrypted": "false",
"client_os": "linux",
"broken_links": ["link_01", "link_02"],
"excluded_files": ["excluded_file_01", "excluded_file_02"],
"cli": ""
}
fake_malformed_data_0_backup_metadata = {
"hostname": "alpha",
"backup_name": "important_data_backup",
"time_stamp": 8475903425,
"curr_backup_level": 0,
"backup_session": 8475903425,
"max_level": 5,
"mode": "fs",
"fs_real_path": "/blabla",
"vol_snap_path": "/blablasnap",
"total_broken_links": 0,
"total_fs_files": 11,
"total_directories": 2,
"backup_size_uncompressed": 4567,
"backup_size_compressed": 1212,
"total_backup_session_size": 6789,
"compression_alg": "None",
"encrypted": "false",
"client_os": "linux",
"broken_links": ["link_01", "link_02"],
"excluded_files": ["excluded_file_01", "excluded_file_02"],
"cli": ""
}
fake_data_0_elasticsearch_hit = {
"_shards": {
"failed": 0,
"successful": 5,
"total": 5
},
"hits": {
"hits": [
{
"_id": "AUx_iu-ewlhuOVELWtH0",
"_index": "freezer",
"_score": 1.0,
"_type": "backups",
"_source": {
"container": "freezer_container",
"hostname": "alpha",
"backup_name": "important_data_backup",
"time_stamp": 8475903425,
"curr_backup_level": 0,
"backup_session": 8475903425,
"max_level": 5,
"mode": "fs",
"fs_real_path": "/blabla",
"vol_snap_path": "/blablasnap",
"total_broken_links": 0,
"total_fs_files": 11,
"total_directories": 2,
"backup_size_uncompressed": 4567,
"backup_size_compressed": 1212,
"total_backup_session_size": 6789,
"compression_alg": "None",
"encrypted": "false",
"client_os": "linux",
"broken_links": ["link_01", "link_02"],
"excluded_files": ["excluded_file_01", "excluded_file_02"],
"cli": ""
}
}
],
"max_score": 1.0,
"total": 1
},
"timed_out": False,
"took": 3
}
fake_data_0_elasticsearch_miss = {
"_shards": {
"failed": 0,
"successful": 5,
"total": 5
},
"hits": {
"hits": [],
"max_score": None,
"total": 0
},
"timed_out": False,
"took": 1
}
fake_job_0_user_id = "f4db4da085f043059441565720b217c7"
fake_job_0_job_id = "e7181e5e-2c75-43f8-92c0-c037ae5f11e4"
fake_job_0_elasticsearch_not_found = {
"_id": "e7181e5e-2c75-43f8-92c0-c037ae5f11e43",
"_index": "freezer",
"_type": "job",
"found": False
}
fake_job_0 = {
"job_actions": [
{
"freezer_action": {
"action": "backup",
"mode": "fs",
"src_file": "/home/tylerdurden/project_mayhem",
"backup_name": "project_mayhem_backup",
"container": "my_backup_container"
},
"max_retries": 3,
"max_retries_interval": 60,
"mandatory": False
},
{
"freezer_action": {
"action": "restore",
"mode": "fs",
"restore_abs_path": "/home/tylerdurden/project_mayhem",
"restore_from_host": "node_on_which_backup_was_made",
"backup_name": "project_mayhem_backup",
"container": "my_backup_container"
},
"max_retries": 3,
"max_retries_interval": 60,
"mandatory": True
}
],
"job_schedule": {
"time_created": 1234,
"time_started": 1234,
"time_ended": 1234,
"status": "stop",
"result": "success",
"schedule_date": "2015-06-02T16:20:00",
"schedule_interval": "2 days"
},
"job_id": "e7181e5e-2c75-43f8-92c0-c037ae5f11e4",
"client_id": "mytenantid_myhostname",
"user_id": "f4db4da085f043059441565720b217c7",
"description": "test action 4"
}
def get_fake_job_0():
return copy.deepcopy(fake_job_0)
def get_fake_job_1():
job = copy.deepcopy(fake_job_0)
job["job_id"] = 'pqoqurioew'
return job
fake_job_0_elasticsearch_found = {
"_id": "e7181e5e-2c75-43f8-92c0-c037ae5f11e4",
"_index": "freezer",
"_source": fake_job_0,
"_type": "actions",
"_version": 1,
"found": True
}
fake_data_1_wrapped_backup_metadata = {
'backup_id': 'b740ed9ad2b646aba304ef54c21c6774',
'user_id': 'qwerty1234',
'user_name': 'asdffdsa',
'backup_metadata': {
"container": "freezer_container",
"hostname": "alpha",
"backup_name": "important_data_backup",
"time_stamp": 125235431,
"curr_backup_level": 1,
"backup_session": 8475903425,
"max_level": 5,
"mode": "fs",
"fs_real_path": "/blabla",
"vol_snap_path": "/blablasnap",
"total_broken_links": 0,
"total_fs_files": 11,
"total_directories": 2,
"backup_size_uncompressed": 4567,
"backup_size_compressed": 1212,
"total_backup_session_size": 6789,
"compression_alg": "None",
"encrypted": "false",
"client_os": "linux",
"broken_links": ["link_01", "link_02"],
"excluded_files": ["excluded_file_01", "excluded_file_02"],
"cli": ""
}
}
fake_client_info_0 = {
"client_id": "test-tenant_5253_test-hostname_09544",
"description": "some usefule text here",
"config_id": "config_id_contains_uuid_of_config"
}
fake_client_info_1 = {
"client_id": "test-tenant_5253_test-hostname_6543",
"description": "also some useful text here",
"config_id": "config_id_blablawhatever"
}
fake_client_entry_0 = {
"client": fake_client_info_0,
"user_id": "user_id-is-provided-keystone"
}
fake_client_entry_1 = {
"client": fake_client_info_0,
"user_id": "user_id-is-provided-keystone"
}
fake_action_0 = {
"freezer_action":
{
"action": "backup",
"mode": "fs",
"src_file": "/home/tylerdurden/project_mayhem",
"backup_name": "project_mayhem_backup",
"container": "my_backup_container",
},
"exit_status": "success",
"max_retries": 3,
"max_retries_interval": 60,
"mandatory": True,
"action_id": "qwerqwerqwerrewq",
"user_id": "user_id-is-provided-by-keystone"
}
fake_action_1 = {
"freezer_action":
{
"action": "backup",
"mode": "fs",
"src_file": "/home/tylerdurden/project_mayhem",
"backup_name": "project_mayhem_backup",
"container": "my_backup_container",
},
"exit_status": "success",
"max_retries": 3,
"max_retries_interval": 60,
"mandatory": True,
"action_id": "jk4lkjbn4r3k",
"user_id": "user_id-is-provided-by-keystone"
}
def get_fake_action_0():
return copy.deepcopy(fake_action_0)
def get_fake_action_1():
return copy.deepcopy(fake_action_1)
fake_session_0 = {
"session_id": 'turistidellademocrazia',
"session_tag": 5,
"description": 'some text here',
"hold_off": 60,
"schedule": {
"time_created": 1234,
"time_started": 1234,
"time_ended": 0,
"status": "stop",
"schedule_date": "2015-06-02T16:20:00"
},
"jobs": {
'venerescollataincorpodalolita': {
"client_id": 'bruco',
"status": 'running',
"start_time": 12344321,
},
'job_id_2': {
"client_id": "cocktail",
"status": 'completed',
"result": 'success',
"start_time": 123321,
"end_time": 123325,
}
},
"time_start": 123412344,
"time_end": 432234432,
"status": "running",
"user_id": "califfo"
}
fake_session_1 = {
"session_id": 'turistidellademocrazia',
"session_tag": 5,
"description": 'some text here',
"hold_off": 60,
"schedule": {
"time_created": 1234,
"time_started": 1234,
"time_ended": 0,
"status": "stop",
"schedule_date": "2015-06-02T16:20:00"
},
"jobs": {
'venerescollataincorpodalolita': {
"client_id": 'bruco',
"status": 'running',
"start_time": 12344321,
}
},
"time_start": 123412344,
"time_end": 432234432,
"status": "running",
"user_id": "califfo"
}
def get_fake_session_0():
return copy.deepcopy(fake_session_0)
def get_fake_session_1():
return copy.deepcopy(fake_session_1)
class FakeReqResp(object):
def __init__(self, method='GET', body=''):
self.method = method
self.body = body
self.stream = io.BytesIO(body)
self.content_length = len(body)
self.context = {}
self.header = {}
def get_header(self, key):
return self.header.get(key, None)
class FreezerBaseTestCase(testtools.TestCase):
def setUp(self):
super(FreezerBaseTestCase, self).setUp()
self._config_fixture = self.useFixture(cfg_fixture.Config())
config.parse_args(args=[])
self.addCleanup(CONF.reset)
self.test_dir = self.useFixture(fixtures.TempDir()).path
self.conf_dir = os.path.join(self.test_dir, 'etc')
os.makedirs(self.conf_dir)
policy.ENFORCER = None
policy.setup_policy(CONF)
class FakeContext(object):
def __init__(self, *args, **kwargs):
self.context = {}
def to_dict(self):
return self.context
def get_req_items(name):
req_info = {'freezer.context': FakeContext()}
return req_info[name]
|
|
import nibabel as nib
import os.path as op
import os
import numpy as np
import logging
import pimms
from AFQ.tasks.decorators import as_file
from AFQ.tasks.utils import get_fname, with_name
import AFQ.data.fetch as afd
from AFQ.data.s3bids import write_json
import AFQ.utils.volume as auv
from AFQ.definitions.mapping import SynMap
from AFQ.definitions.utils import Definition
from dipy.io.streamline import load_tractogram
from dipy.io.stateful_tractogram import Space
logger = logging.getLogger('AFQ.api.mapping')
@pimms.calc("b0_warped_file")
@as_file('_b0_in_MNI.nii.gz')
def export_registered_b0(subses_dict, data_imap, mapping):
"""
full path to a nifti file containing
b0 transformed to template space
"""
mean_b0 = nib.load(data_imap["b0_file"]).get_fdata()
warped_b0 = mapping.transform(mean_b0)
warped_b0 = nib.Nifti1Image(warped_b0, data_imap["reg_template"].affine)
return warped_b0, dict(b0InSubject=data_imap["b0_file"])
@pimms.calc("template_xform_file")
@as_file('_template_xform.nii.gz')
def template_xform(subses_dict, dwi_affine, mapping, data_imap):
"""
full path to a nifti file containing
registration template transformed to subject space
"""
template_xform = mapping.transform_inverse(
data_imap["reg_template"].get_fdata())
template_xform = nib.Nifti1Image(template_xform, dwi_affine)
return template_xform, dict()
@pimms.calc("rois_file")
def export_rois(subses_dict, data_imap, mapping, dwi_affine):
"""
dictionary of full paths to Nifti1Image files of ROIs
transformed to subject space
"""
bundle_dict = data_imap["bundle_dict"]
rois_dir = op.join(subses_dict['results_dir'], 'ROIs')
os.makedirs(rois_dir, exist_ok=True)
roi_files = {}
for bundle in bundle_dict:
roi_files[bundle] = []
for roi_type in ['include', 'exclude']:
if roi_type in bundle_dict[bundle]:
for ii, roi in enumerate(bundle_dict[bundle][roi_type]):
fname = op.split(
get_fname(
subses_dict,
f'_desc-ROI-{bundle}-{ii + 1}-{roi_type}.nii.gz'))
fname = op.join(rois_dir, fname[1])
if not op.exists(fname):
if "space" not in bundle_dict[bundle]\
or bundle_dict[bundle][
"space"] == "template":
warped_roi = auv.transform_inverse_roi(
roi,
mapping,
bundle_name=bundle)
else:
warped_roi = roi
# Cast to float32,
# so that it can be read in by MI-Brain:
logger.info(f"Saving {fname}")
nib.save(
nib.Nifti1Image(
warped_roi.astype(np.float32),
dwi_affine), fname)
meta = dict()
meta_fname = fname.split('.')[0] + '.json'
write_json(meta_fname, meta)
roi_files[bundle].append(fname)
return {'rois_file': roi_files}
@pimms.calc("mapping")
def mapping(subses_dict, reg_subject, data_imap, bids_info,
mapping_definition=None):
"""
mapping from subject to template space.
Parameters
----------
mapping_definition : instance of `AFQ.definitions.mapping`, optional
This defines how to either create a mapping from
each subject space to template space or load a mapping from
another software. If creating a map, will register reg_subject and
reg_template.
If None, use SynMap()
Default: None
"""
reg_template = data_imap["reg_template"]
if mapping_definition is None:
mapping_definition = SynMap()
if not isinstance(mapping_definition, Definition):
raise TypeError(
"mapping must be a mapping defined"
+ " in `AFQ.definitions.mapping`")
if bids_info is not None:
mapping_definition.find_path(
bids_info["bids_layout"],
subses_dict["dwi_file"],
bids_info["subject"],
bids_info["session"])
return mapping_definition.get_for_subses(
subses_dict, bids_info, reg_subject, reg_template)
@pimms.calc("mapping")
def sls_mapping(subses_dict, reg_subject, data_imap, bids_info,
tractography_imap, mapping_definition=None):
"""
mapping from subject to template space.
Parameters
----------
mapping_definition : instance of `AFQ.definitions.mapping`, optional
This defines how to either create a mapping from
each subject space to template space or load a mapping from
another software. If creating a map, will register reg_subject and
reg_template.
If None, use SynMap()
Default: None
"""
reg_template = data_imap["reg_template"]
if mapping_definition is None:
mapping_definition = SynMap()
if not isinstance(mapping_definition, Definition):
raise TypeError(
"mapping must be a mapping defined"
+ " in `AFQ.definitions.mapping`")
if bids_info is not None:
mapping_definition.find_path(
bids_info["bids_layout"],
subses_dict["dwi_file"],
bids_info["subject"],
bids_info["session"])
streamlines_file = tractography_imap["streamlines_file"]
tg = load_tractogram(
streamlines_file, reg_subject,
Space.VOX, bbox_valid_check=False)
tg.to_rasmm()
atlas_fname = op.join(
afd.afq_home,
'hcp_atlas_16_bundles',
'Atlas_in_MNI_Space_16_bundles',
'whole_brain',
'whole_brain_MNI.trk')
if not op.exists(atlas_fname):
afd.fetch_hcp_atlas_16_bundles()
hcp_atlas = load_tractogram(
atlas_fname,
'same', bbox_valid_check=False)
return mapping_definition.get_for_subses(
subses_dict, bids_info, reg_subject, reg_template,
subject_sls=tg.streamlines,
template_sls=hcp_atlas.streamlines)
@pimms.calc("reg_subject")
def get_reg_subject(data_imap, bids_info, subses_dict, dwi_affine,
reg_subject_spec="power_map"):
"""
Nifti1Image which represents this subject
when registering the subject to the template
Parameters
----------
reg_subject_spec : str, instance of `AFQ.definitions.scalar`, optional # noqa
The source image data to be registered.
Can either be a Nifti1Image, a scalar definition, or str.
if "b0", "dti_fa_subject", "subject_sls", or "power_map,"
image data will be loaded automatically.
If "subject_sls" is used, slr registration will be used
and reg_template should be "hcp_atlas".
Default: "power_map"
"""
reg_template = data_imap["reg_template"]
if not isinstance(reg_subject_spec, str)\
and not isinstance(reg_subject_spec, nib.Nifti1Image)\
and not isinstance(reg_subject_spec, Definition):
raise TypeError(
"reg_subject must be a str, Definition, or Nifti1Image")
filename_dict = {
"b0": data_imap["b0_file"],
"power_map": data_imap["pmap_file"],
"dti_fa_subject": data_imap["dti_fa_file"],
"subject_sls": data_imap["b0_file"],
}
bm = nib.load(data_imap["brain_mask_file"])
if bids_info is not None and isinstance(reg_subject_spec, Definition):
reg_subject_spec.find_path(
bids_info["bids_layout"],
subses_dict["dwi_file"],
bids_info["subject"],
bids_info["session"])
reg_subject_spec = reg_subject_spec.get_data(
subses_dict, bids_info, dwi_affine,
reg_template, None)
else:
if reg_subject_spec in filename_dict:
reg_subject_spec = filename_dict[reg_subject_spec]
img = nib.load(reg_subject_spec)
bm = bm.get_fdata().astype(bool)
masked_data = img.get_fdata()
masked_data[~bm] = 0
img = nib.Nifti1Image(masked_data, img.affine)
return img
def get_mapping_plan(kwargs, use_sls=False):
mapping_tasks = with_name([
export_registered_b0, template_xform, export_rois, mapping,
get_reg_subject])
bids_info = kwargs.get("bids_info", None)
# add custom scalars
for scalar in kwargs["scalars"]:
if isinstance(scalar, Definition):
if bids_info is None:
scalar.find_path(
None,
kwargs["subses_dict"]["dwi_file"],
None,
None
)
else:
scalar.find_path(
bids_info["bids_layout"],
kwargs["subses_dict"]["dwi_file"],
bids_info["subject"],
bids_info["session"]
)
mapping_tasks[f"{scalar.name}_file_res"] =\
pimms.calc(f"{scalar.name}_file")(scalar.get_for_subses())
if use_sls:
mapping_tasks["mapping_res"] = sls_mapping
return pimms.plan(**mapping_tasks)
|
|
from collections import defaultdict
from contextlib import contextmanager
import windows
import windows.winobject.exception as winexception
from windows import winproxy
from windows.generated_def import windef
from windows.generated_def.winstructs import *
from .breakpoints import *
class FakeDebuggerCurrentThread(object):
"""A pseudo thread representing the current thread at exception time"""
def __init__(self, dbg):
self.dbg = dbg
@property
def tid(self):
return windows.current_thread.tid
@property
def context(self):
"""!!! This context in-place modification will be effective without set_context"""
return self.dbg.get_exception_context()
def set_context(self, context):
# The context returned by 'self.context' already modify the return context in place..
pass
class LocalDebugger(object):
"""A debugger interface around :func:`AddVectoredExceptionHandler`.
Handle:
* Standard BP (int3)
* Hardware-Exec BP (DrX)
"""
def __init__(self):
self.breakpoints = {}
self._memory_save = {}
self._reput_breakpoint = {}
self._hxbp_breakpoint = defaultdict(dict)
self.callback_vectored = winexception.VectoredException(self.callback)
winproxy.AddVectoredExceptionHandler(0, self.callback_vectored)
self.setup_hxbp_callback_vectored = winexception.VectoredException(self.setup_hxbp_callback)
self.hxbp_info = None
self.code = windows.native_exec.create_function(b"\xcc\xc3", [PVOID])
self.veh_depth = 0
self.current_exception = None
self.exceptions_stack = [None]
self.current_process = windows.current_process
self.current_thread = FakeDebuggerCurrentThread(self)
@contextmanager
def NewCurrentException(self, exc):
try:
self.exceptions_stack.append(exc)
self.current_exception = exc
self.veh_depth += 1
yield exc
finally:
self.exceptions_stack.pop()
self.current_exception = self.exceptions_stack[-1]
self.veh_depth -= 1
def get_exception_code(self):
"""Return ExceptionCode of current exception"""
return self.current_exception[0].ExceptionRecord[0].ExceptionCode
def get_exception_context(self):
"""Return context of current exception"""
return self.current_exception[0].ContextRecord[0]
def single_step(self):
"""Make the current thread to single step"""
self.get_exception_context().EEFlags.TF = 1
return windef.EXCEPTION_CONTINUE_EXECUTION
def _pass_breakpoint(self, addr, single_step):
with windows.utils.VirtualProtected(addr, 1, PAGE_EXECUTE_READWRITE):
windows.current_process.write_memory(addr, self._memory_save[addr])
self._reput_breakpoint[windows.current_thread.tid] = self.breakpoints[addr], single_step
return self.single_step()
def _local_resolve(self, addr):
if not isinstance(addr, basestring):
return addr
dll, api = addr.split("!")
dll = dll.lower()
modules = {m.name[:-len(".dll")] if m.name.endswith(".dll") else m.name : m for m in windows.current_process.peb.modules}
mod = None
if dll in modules:
mod = [modules[dll]]
if not mod:
return None
# TODO: optim exports are the same for whole system (32 vs 64 bits)
# I don't have to reparse the exports each time..
# Try to interpret api as an int
try:
api_int = int(api, 0)
return mod[0].baseaddr + api_int
except ValueError:
pass
exports = mod[0].pe.exports
if api not in exports:
dbgprint("Error resolving <{0}> in local process".format(addr, target), "DBG")
raise ValueError("Unknown API <{0}> in DLL {1}".format(api, dll))
return exports[api]
def callback(self, exc):
with self.NewCurrentException(exc):
return self.handle_exception(exc)
def handle_exception(self, exc):
exp_code = self.get_exception_code()
context = self.get_exception_context()
exp_addr = context.pc
if exp_code == EXCEPTION_BREAKPOINT and exp_addr in self.breakpoints:
res = self.breakpoints[exp_addr].trigger(self, exc)
single_step = self.get_exception_context().EEFlags.TF # single step activated by breakpoint
if exp_addr in self.breakpoints: # Breakpoint deleted itself ?
return self._pass_breakpoint(exp_addr, single_step)
return EXCEPTION_CONTINUE_EXECUTION
if exp_code == EXCEPTION_SINGLE_STEP and windows.current_thread.tid in self._reput_breakpoint:
bp, single_step = self._reput_breakpoint[windows.current_thread.tid]
self._memory_save[bp._addr] = windows.current_process.read_memory(bp._addr, 1)
with windows.utils.VirtualProtected(bp._addr, 1, PAGE_EXECUTE_READWRITE):
windows.current_process.write_memory(bp._addr, b"\xcc")
del self._reput_breakpoint[windows.current_thread.tid]
if single_step:
return self.on_exception(exc)
return windef.EXCEPTION_CONTINUE_EXECUTION
elif exp_code == EXCEPTION_SINGLE_STEP and exp_addr in self._hxbp_breakpoint[windows.current_thread.tid]:
res = self._hxbp_breakpoint[windows.current_thread.tid][exp_addr].trigger(self, exc)
context.EEFlags.RF = 1
return EXCEPTION_CONTINUE_EXECUTION
return self.on_exception(exc)
def on_exception(self, exc):
"""Called on exception"""
if not self.get_exception_code() in winexception.exception_name_by_value:
return windef.EXCEPTION_CONTINUE_SEARCH
return windef.EXCEPTION_CONTINUE_EXECUTION
def del_bp(self, bp, targets=None):
"""Delete a breakpoint"""
# TODO: check targets..
if bp.type == STANDARD_BP:
with windows.utils.VirtualProtected(bp.addr, 1, PAGE_EXECUTE_READWRITE):
windows.current_process.write_memory(bp.addr, self._memory_save[bp.addr])
del self._memory_save[bp.addr]
del self.breakpoints[bp.addr]
return
if bp.type == HARDWARE_EXEC_BP:
threads_by_tid = {t.tid: t for t in windows.current_process.threads}
for tid in self._hxbp_breakpoint:
if bp.addr in self._hxbp_breakpoint[tid] and self._hxbp_breakpoint[tid][bp.addr] == bp:
if tid == windows.current_thread.tid:
self.remove_hxbp_self_thread(bp.addr)
else:
self.remove_hxbp_other_thread(bp.addr, threads_by_tid[tid])
del self._hxbp_breakpoint[tid][bp.addr]
return
raise NotImplementedError("Unknow BP type {0}".format(bp.type))
def add_bp(self, bp, target=None):
"""Add a breakpoint, bp is a "class:`Breakpoint`
If the ``bp`` type is ``STANDARD_BP``, target must be None.
If the ``bp`` type is ``HARDWARE_EXEC_BP``, target can be None (all threads), or some threads of the process
"""
if bp.type == HARDWARE_EXEC_BP:
return self.add_bp_hxbp(bp, target)
if bp.type != STANDARD_BP:
raise NotImplementedError("Unknow BP type {0}".format(bp.type))
if target not in [None, windows.current_process]:
raise ValueError("LocalDebugger: STANDARD_BP doest not support targets {0}".format(targets))
addr = self._local_resolve(bp.addr)
bp._addr = addr
self.breakpoints[addr] = bp
self._memory_save[addr] = windows.current_process.read_memory(addr, 1)
with windows.utils.VirtualProtected(addr, 1, PAGE_EXECUTE_READWRITE):
windows.current_process.write_memory(addr, b"\xcc")
return
def add_bp_hxbp(self, bp, targets=None):
if bp.type != HARDWARE_EXEC_BP:
raise NotImplementedError("Add non standard-BP in LocalDebugger")
if targets is None:
targets = windows.current_process.threads
for thread in targets:
if thread.owner.pid != windows.current_process.pid:
raise ValueError("Cannot add HXBP to target in remote process {0}".format(thread))
if thread.tid == windows.current_thread.tid:
self.setup_hxbp_self_thread(bp.addr)
else:
self.setup_hxbp_other_thread(bp.addr, thread)
self._hxbp_breakpoint[thread.tid][bp.addr] = bp
def setup_hxbp_callback(self, exc):
with self.NewCurrentException(exc):
exp_code = self.get_exception_code()
if exp_code != windef.EXCEPTION_BREAKPOINT:
return windef.EXCEPTION_CONTINUE_SEARCH
context = self.get_exception_context()
exp_addr = context.pc
hxbp_used = self.setup_hxbp_in_context(context, self.data)
windows.current_process.write_memory(exp_addr, b"\x90")
# Raising in the VEH is a bad idea..
# So better give the information to triggerer..
if hxbp_used is not None:
self.get_exception_context().func_result = exp_addr
else:
self.get_exception_context().func_result = 0
return windef.EXCEPTION_CONTINUE_EXECUTION
def remove_hxbp_callback(self, exc):
with self.NewCurrentException(exc):
exp_code = self.get_exception_code()
context = self.get_exception_context()
exp_addr = context.pc
hxbp_used = self.remove_hxbp_in_context(context, self.data)
windows.current_process.write_memory(exp_addr, b"\x90")
# Raising in the VEH is a bad idea..
# So better give the information to triggerer..
if hxbp_used is not None:
self.get_exception_context().Eax = exp_addr
else:
self.get_exception_context().Eax = 0
return windef.EXCEPTION_CONTINUE_EXECUTION
def setup_hxbp_in_context(self, context, addr):
for i in range(4):
is_used = getattr(context.EDr7, "L" + str(i))
empty_drx = str(i)
if not is_used:
context.EDr7.GE = 1
context.EDr7.LE = 1
setattr(context.EDr7, "L" + empty_drx, 1)
setattr(context, "Dr" + empty_drx, addr)
return i
return None
def remove_hxbp_in_context(self, context, addr):
for i in range(4):
target_drx = str(i)
is_used = getattr(context.EDr7, "L" + str(i))
draddr = getattr(context, "Dr" + target_drx)
if is_used and draddr == addr:
setattr(context.EDr7, "L" + target_drx, 0)
setattr(context, "Dr" + target_drx, 0)
return i
return None
def setup_hxbp_self_thread(self, addr):
if self.current_exception is not None:
x = self.setup_hxbp_in_context(self.get_exception_context(), addr)
if x is None:
raise ValueError("Could not setup HXBP")
return
self.data = addr
with winexception.VectoredExceptionHandler(1, self.setup_hxbp_callback):
x = self.code()
if x is None:
raise ValueError("Could not setup HXBP")
windows.current_process.write_memory(x, b"\xcc")
return
def setup_hxbp_other_thread(self, addr, thread):
thread.suspend()
ctx = thread.context
x = self.setup_hxbp_in_context(ctx, addr)
if x is None:
raise ValueError("Could not setup HXBP in {0}".format(thread))
thread.set_context(ctx)
thread.resume()
def remove_hxbp_self_thread(self, addr):
if self.current_exception is not None:
x = self.remove_hxbp_in_context(self.get_exception_context(), addr)
if x is None:
raise ValueError("Could not setup HXBP")
return
self.data = addr
with winexception.VectoredExceptionHandler(1, self.remove_hxbp_callback):
x = self.code()
if x is None:
raise ValueError("Could not remove HXBP")
windows.current_process.write_memory(x, b"\xcc")
return
def remove_hxbp_other_thread(self, addr, thread):
thread.suspend()
ctx = thread.context
x = self.remove_hxbp_in_context(ctx, addr)
if x is None:
raise ValueError("Could not setup HXBP in {0}".format(thread))
thread.set_context(ctx)
thread.resume()
|
|
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = "$Id$"
import os, sys, re
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
import platform
if platform.machine() == 'i386':
if platform.architecture()[0] == '32bit':
arch = 'i386'
else:
arch = 'x86_64'
else:
# just a guess
arch = platform.machine()
executables['ranlib'] = ["ranlib"]
executables['linker_so'] += ['-undefined', 'dynamic_lookup']
for k, v in executables.iteritems():
if v and v[0] == 'cc':
v += ['-arch', arch]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
ignore, macros, include_dirs = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
raise CompileError, msg
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so,
cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def _is_gcc(self, compiler_name):
return "gcc" in compiler_name or "g++" in compiler_name
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC has to be told to pass the -R option through
# to the linker, whereas other compilers just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
if self._is_gcc(compiler):
return ["-Wl,+s", "-L" + dir]
return ["+s", "-L" + dir]
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
elif self._is_gcc(compiler):
return "-Wl,-R" + dir
else:
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
# On OSX users can specify an alternate SDK using
# '-isysroot', calculate the SDK root if it is specified
# (and use it further on)
cflags = sysconfig.get_config_var('CFLAGS') or ''
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (
dir.startswith('/System/') or (
dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
|
|
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
|
|
#!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "nick.sweet@colorado.edu"
__status__ = "Development"
import logging
import numpy as np
import scipy as sp
import math
import time
from cops_and_robots.fusion.gaussian_mixture import (GaussianMixture,
fleming_prior,
uniform_prior,
)
from cops_and_robots.fusion.grid import Grid
from cops_and_robots.fusion.filter import Filter
from cops_and_robots.fusion.variational_bayes import VariationalBayes
from cops_and_robots.fusion.softmax import (geometric_model,
neighbourhood_model,
product_model,
)
class GaussSumFilter(Filter):
"""docstring for GaussSumFilter
Fusion methods describe how to perform data fusion, with sequential updating
at each time step, full batch doing a complete batch update of all sensor
information from the initial prior, and windowed batch fusing all sensor
information provided within a specific window.
Compression methods describe how a batch (full or windowed) fusion is
performed. Product is exact fusion, neighbourhood uses a reduced number
of neighbour classes near the joint measurement class, and geometric uses
the minimum number of classes next to the joint measurement class.
"""
fusion_methods = ['sequential', 'full batch', 'windowed batch']
compression_methods = ['product', 'neighbourhood', 'geometric']
def __init__(self,
fusion_method='sequential',
compression_method='geometric',
window=1,
*args,
**kwargs
):
super(GaussSumFilter, self).__init__(probability_type='gaussian_mixture',
*args, **kwargs)
self.fusion_method = fusion_method
self.compression_method = compression_method
self.window = window
# Set up the VB fusion parameters
self.vb = VariationalBayes()
def _human_update(self, human_sensor):
# No update if human sensor doesn't have a statement
if not self._verify_human_update(human_sensor):
return
# Pause Rosbag process (if using one)
if self.rosbag_process is not None:
logging.info('Stopped rosbag to do fusion...')
self.rosbag_process.stdin.write(' ') # stop
self.rosbag_process.stdin.flush()
time.sleep(0.5)
if self.fusion_method == 'sequential':
self.fusion(human_sensor)
elif self.fusion_method in ['full batch', 'windowed batch']:
self.batch_fusion(human_sensor)
# Resume Rosbag process (if using one)
if self.rosbag_process is not None:
self.rosbag_process.stdin.write(' ') # start rosbag
self.rosbag_process.stdin.flush()
logging.info('Restarted rosbag!')
def batch_fusion(self, human_sensor):
#<>TODO: update from new human_sensor class!
measurement = human_sensor.get_statement_likelihood(discretized=False)
self.measurements.append(measurement)
# self.measurements.append(measurement)
if len(self.measurements) >= self.window:
# Create combined measurement labels
measurement_labels = []
for measurement in self.measurements:
measurement_labels.append(measurement['relation'])
measurement_label = " + ".join(measurement_labels)
# Concatenate softmax models
models = []
for measurement in self.measurements:
grounding = measurement['grounding']
relation_class = measurement['relation class']
model = grounding.relations.binary_models[relation_class]
models.append(model)
# Compress the likelihood
if self.compression_method == 'product':
likelihood = product_model(models)
elif self.compression_method == 'neighbourhood':
likelihood = neighbourhood_model(models, measurement_labels)
elif self.compression_method == 'geometric':
likelihood = geometric_model(models, measurement_labels)
# Perform fusion
self.fusion(likelihood, measurement_label, human_sensor)
# Discard measurements for windowed, increase window size for full
if self.fusion_method == 'windowed batch':
self.measurements = []
elif self.fusion_method == 'full batch':
self.window += self.window
def fusion(self, human_sensor):
likelihood, label = human_sensor.get_statement_likelihood(discretized=False)
prior = self.probability.copy()
# Perform fusion
if type(likelihood) is list:
self.multi_likelihood_fusion(likelihood, label, human_sensor)
else:
self.probability.measurement_update(likelihood, label)
# Include human false alarm rate
posterior_weight = 1 - human_sensor.false_alarm_prob
self.probability.combine_gms(prior, posterior_weight)
def multi_likelihood_fusion(self, likelihoods, measurement_label, human_sensor):
if self.fusion_method == 'full batch':
prior = self.original_prior
else:
prior = self.probability
# <>TODO: clean up this section!
mixtures = []
raw_weights = []
for u, mixand_weight in enumerate(prior.weights):
prior_mixand = GaussianMixture(1, prior.means[u], prior.covariances[u])
for i, likelihood in enumerate(likelihoods):
mu, sigma, beta = self.vb.update(measurement=measurement_label,
likelihood=likelihood,
prior=prior_mixand,
get_raw_beta=True,
)
new_mixture = GaussianMixture(beta, mu, sigma)
# Weight the posterior by the human's false alarm rate
alpha = human_sensor.false_alarm_prob / 2
prior_mixand.combine_gms([new_mixture], alpha)
mixtures.append(prior_mixand)
raw_weights.append(beta * mixand_weight)
# Renormalize raw weights
raw_weights = np.array(raw_weights).reshape(-1)
raw_weights /= raw_weights.sum()
try:
mixtures[0].combine_gms(mixtures[1:], raw_weights=raw_weights)
posterior = mixtures[0]
except IndexError:
logging.error('ERROR! Cannot combine GMs.')
posterior = prior
self.probability = posterior
def robber_detected(self, robber_pose):
"""Update the filter for a detected robber.
"""
# <>TODO: Figure out better strategy when robber detected
self.probability = GaussianMixture(1, robber_pose[0:2], 0.01 * np.eye(2))
self.finished = True
# def truncate_gaussians(self):
# # To start, just use map bounds
# bounds = self.feasible_layer.bounds
# logging.debug('Constraints: {}'.format(bounds))
# weights = self.probability.weights
# means = self.probability.means
# covariances = self.probability.covariances
# # V = np.array([[bounds[0],bounds[2]],[bounds[1],bounds[3]]])
# # Bcon, upper_bound = vert2con(V.T)
# Bcon = np.array([[1/bounds[0], 0, 0, 0],
# [1/bounds[2], 0, 0, 0],
# [0, 1/bounds[1], 0, 0],
# [0, 1/bounds[3], 0, 0],
# # [0, 0, 1, 1],
# # [0, 0, -1, -1,],
# ])
# upper_bound = np.array([[1],
# [1],
# [1],
# [1],
# # [1],
# # [1],
# ])
# lower_bound = -np.inf*np.ones((4, 1))
# new_means = []
# new_covariances = []
# for i, mean in enumerate(means):
# covariance = covariances[i]
# new_mean, new_covariance, wt = self.iterative_gaussian_trunc_update(mean,
# covariance, Bcon, lower_bound, upper_bound)
# new_means.append(new_mean)
# new_covariances.append(new_covariance)
# self.probability = GaussianMixture(weights=weights, means=new_means,
# covariances=new_covariances)
# def vert2con(V):
# # will assume convhull
# pass
# def iterative_gaussian_trunc_update(self, mean, covariance, Bcon,
# lower_bound, upper_bound,
# dosort=False, dosplit=False):
# if dosplit:
# pass
# if dosort:
# pass
# # probreductionmeasure = np.zeros(upperbound.shape)
# # for ii in range(Bcon):
# # probreductionmeasure[ii] = (upperbound[ii]-Bcon[ii]*mean) / \
# # np.sqrt(Bcon[ii] .dot covariance .dot Bcon[ii].T)
# else:
# Bmat = Bcon
# ubound = upper_bound
# lbound = lower_bound
# # Initialize mean and covariance matrix to be updated
# muTilde = mean
# SigmaTilde = covariance
# # print SigmaTilde
# # do iterative constraint updates
# for ii in range(Bmat.shape[0]):
# phi_ii = Bmat[ii].T
# # Eigenvalue decomp
# Tii, Wii = np.linalg.eig(SigmaTilde)
# # Take real parts
# Tii = np.real(Tii)
# Wii = np.real(Wii)
# # Make a diagonal matrix
# Tii = np.diag(Tii)
# # print 'eigenvector', Wii
# # print np.sqrt(Wii)
# # print 'Eigenvalues', Tii.T
# # print phi_ii
# # Find orthonogonal Sii via Gram-Schmidt
# P = np.sqrt(Wii) .dot (Tii.T) .dot (phi_ii)
# P = np.expand_dims(P, axis=0)
# # print 'P', P
# tau_ii = np.sqrt(phi_ii.T .dot (SigmaTilde) .dot (phi_ii))
# Qtilde, Rtilde = np.linalg.qr(P)
# # print 'R', Rtilde
# # print tau_ii
# # print Qtilde
# # Sii = (Rtilde[0][0] / tau_ii) * (Qtilde.T)
# # Compute transformed lower and upper 1D constraint bounds
# # print 'mu', muTilde
# # print 'phi', phi_ii
# # print phi_ii.T .dot (muTilde)
# # print lbound[ii]
# cii = (lbound[ii] - phi_ii.T .dot (muTilde)) / tau_ii
# dii = (ubound[ii] - phi_ii.T .dot (muTilde)) / tau_ii
# print 'cii', cii
# print 'dii', dii
# # compute renormalization stats
# alphaiiden = np.maximum(sp.special.erf(dii/np.sqrt(2)) - sp.special.erf(cii/np.sqrt(2)), np.finfo(float).eps)
# alphaii = np.sqrt(2/np.pi) / alphaiiden
# muii = alphaii * np.exp(-0.5 * cii ** 2) - np.exp(-0.5 * dii ** 2)
# # check for -/+ inf bounds to avoid nans
# if np.isinf(cii).all() and not np.isinf(dii).all():
# sig2ii = alphaii * ( -np.exp(-0.5*dii ** 2) * (dii-2*muii) ) + muii ** 2 + 1
# elif np.isinf(dii).all() and not np.isinf(cii).all():
# sig2ii = alphaii * ( np.exp(-0.5*cii ** 2) * (cii-2*muii) ) + muii ** 2 + 1
# elif np.isinf(dii).all() and np.isinf(cii).all():
# sig2ii = muii ** 2 + 1
# else:
# sig2ii = alphaii * ( np.exp(-0.5*cii ** 2)*(cii-2*muii) - \
# np.exp(-0.5*dii ** 2)*(dii-2*muii) ) + muii ** 2 + 1
# if sig2ii <= 0:
# logging.error('Something''s wrong: sig2ii <=0!')
# # get mean and covariance of transformed state estimate:
# ztilde_ii = np.concatenate((np.expand_dims(muii, axis=0), np.zeros((muTilde.shape[0]-1, 1))), axis=0)
# Ctilde_ii = np.diag(np.concatenate((np.expand_dims(sig2ii, axis=0), np.ones((muTilde.shape[0]-1,1)))));
# # recover updated estimate in original state space for next/final pass
# muTilde = Tii * np.sqrt(Wii) * Sii.T * ztilde_ii + muTilde
# SigmaTilde = Tii * np.sqrt(Wii)*Sii.T * Ctilde_ii * Sii * np.sqrt(Wii) * Tii.T
# print Tii
# print Wii
# print 'Sii', Sii.T
# print Ctilde_ii
# # ensure symmetry:
# SigmaTilde = 0.5 * (SigmaTilde + SigmaTilde.T)
# print SigmaTilde
# muOut = muTilde
# SigmaOut = SigmaTilde
# # compute updated likelihood
# # pass
# wtOut = 1
# return muOut, SigmaOut, wtOut #lkOut
def test_fusion(fusion_method='sequential', speed_test=True):
from cops_and_robots.map_tools.map import Map
from cops_and_robots.map_tools.probability_layer import ProbabilityLayer
from cops_and_robots.human_tools.human import Human
import matplotlib.pyplot as plt
map_ = Map()
human_sensor = Human(map_=map_)
kwargs = {'target_name': 'Roy',
'fusion_method': fusion_method,
'dynamic_model': False,
}
product_filter = GaussSumFilter(compression_method='product', **kwargs)
neighbourhood_filter = GaussSumFilter(compression_method='neighbourhood', **kwargs)
geometric_filter = GaussSumFilter(compression_method='geometric', **kwargs)
# Plot initial state
fig = plt.figure()
ax = fig.add_subplot(111)
probability_layer = ProbabilityLayer(geometric_filter, bounds=map_.bounds,
grid_size=0.1, fig=fig, ax=ax)
probability_layer.plot()
# print probability_layer.filter.probability.prob
plt.show()
# Plot sensor updates
human_utterances = ['I know Roy is inside the hallway.',
'I know Roy is near the fern.',
'I know Roy is not inside the kitchen.',
]
for utterance in human_utterances:
human_sensor.utterance = utterance
human_sensor.new_update = True
geometric_filter.update(human_sensor=human_sensor)
# fig = plt.Figure()
probability_layer = ProbabilityLayer(geometric_filter, bounds=map_.bounds,
grid_size=0.1, fig=fig, ax=ax)
probability_layer.plot()
plt.show()
if __name__ == '__main__':
test_fusion()
|
|
"""
PSET-6
Your friend, who is also taking 6.00.1x, is really excited about the program she
wrote for Problem 1 of this problem set. She sends you emails, but they're all
encrypted with the Caesar cipher!
If you know which shift key she is using, then decrypting her message is an easy
task. If the string message is the encrypted message and k is the shift key she
is using, then calling applyShift(message, 26-k) returns her original message.
Do you see why?
The problem is, you don't know which shift key she is using. The good news is,
you know your friend only speaks and writes English words. So if you can write
a program to find the decoding that produces the maximum number of real English
words, you can probably find the right decoding (there's always a chance that
the shift may not be unique. Accounting for this would use statistical methods
that we won't require of you.)
Implement findBestShift(). This function takes a wordList and a sample of
encrypted text and attempts to find the shift that encoded the text. A simple
indication of whether or not the correct shift has been found is if most of
the words obtained after a shift are valid words. Note that this only means that
most of the words obtained are actual words. It is possible to have a message
that can be decoded by two separate shifts into different sets of words. While
there are various strategies for deciding between ambiguous decryptions, for
this problem we are only looking for a simple solution.
To assist you in solving this problem, we have provided a helper function,
isWord(wordList, word). This simply determines if word is a valid word according
to the wordList. This function ignores capitalization and punctuation.
"""
import string
import random
import operator
# helper classes code
# --------------------------------
class CharAlphaASCII(object):
ALPHA_LEN = 26
ASCII_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
ASCII_CHARS_IND = {'A': 0, 'C': 2, 'B': 1, 'E': 4, 'D': 3, 'G': 6, 'F': 5, \
'I': 8, 'H': 7, 'K': 10, 'J': 9, 'M': 12, 'L': 11, \
'O': 14, 'N': 13, 'Q': 16, 'P': 15, 'S': 18, 'R': 17, \
'U': 20, 'T': 19, 'W': 22, 'V': 21, 'Y': 24, 'X': 23, \
'Z': 25, \
'a': 26, 'c': 28, 'b': 27, 'e': 30, 'd': 29, 'g': 32, \
'f': 31, 'i': 34, 'h': 33, 'k': 36, 'j': 35, 'm': 38, \
'l': 37, 'o': 40, 'n': 39, 'q': 42, 'p': 41, 's': 44, \
'r': 43, 'u': 46, 't': 45, 'w': 48, 'v': 47, 'y': 50, \
'x': 49, 'z': 51}
def __init__(self, char):
if len(char) > 1:
raise ValueError("CharAlphaASCII can't be more than 1 of length")
if not char.isalpha():
raise ValueError("CharAlphaASCII only accepts ASCII alpha chars")
self.char = char[0]
def __add__(self, num):
if type(num) != int:
raise TypeError
return CharAlphaASCII( self.operation(num, operator.add) )
def __sub__(self, num):
if type(num) != int:
raise TypeError
return CharAlphaASCII( self.operation(num, operator.sub) )
def __str__(self):
return self.char
def __lt__(self, char):
return self.char < char
def __le__(self, char):
return self.char <= char
def __eq__(self, char):
return self.char == char
def __gt__(self, char):
return self.char > char
def __ge__(self, char):
return self.char >= char
def __len__(self, char):
return len(self.char)
def operation(self, num, op):
if type(num) != int:
raise TypeError
index = self.ASCII_CHARS_IND[self.char]
if index < self.ALPHA_LEN:
newIndex = op(index, num) % self.ALPHA_LEN
else:
newIndex = op(index, num) % self.ALPHA_LEN + self.ALPHA_LEN
return self.ASCII_CHARS[newIndex]
def ToUnicode(self):
return ord(self.char)
class Cstr(str, object):
def __init__(self, s):
self.s = s
def __add__(self, s):
return Cstr(self.s + str(s))
def __str__(self):
return self.s
# --------------------------------
# END of helper classes code
def applyCoder_CSTR(text, shift):
"""
Applies the coder to the text. Returns the encoded text.
text: string
coder: dict with mappings of characters to shifted characters
returns: text after mapping coder chars to original text
"""
cs = Cstr("")
for char in text:
if char.isalpha():
C = CharAlphaASCII(char)
C += shift
cs += C
else:
cs += char
return str(cs)
def findBestShift(wordList, text):
"""
Finds a shift key that can decrypt the encoded text.
text: string
returns: 0 <= int < 26
"""
def linearFind(wordList, words):
if len(words) == 0:
raise ValueError("empty text")
alpha_len = 26
matches = 0
mismatches = 0
for k in range(alpha_len):
if matches > mismatches:
return (alpha_len - (k - 1)) % alpha_len
matches = 0
mismatches = 0
for word in words:
word = strip(word.lower())
enc = applyCoder_CSTR(word, alpha_len - k)
if enc in wordList:
matches += 1
else:
mismatches += 1
return 0
def randTestFind(wordList, words):
if len(words) == 0:
raise ValueError("empty text")
alpha_len = 26
iters = 5
found = False
if iters >= len(words):
iters = len(words) - 1
for k in range(alpha_len):
if found == True:
return (alpha_len - (k - 1)) % alpha_len
"""
Creating history for random indices
to avoid the same word being tested
more than once.
"""
rands = []
for i in range(iters):
rand = random.randint(0, len(words) - 1)
while rand in rands:
rand = random.randint(0, len(words) - 1)
rands.append(rand)
word = strip(words[rand].lower())
enc = applyCoder_CSTR(word, alpha_len - k)
if enc in wordList:
found = True
else:
found = False
return 0
####----findBestShift----####
if text == "":
raise ValueError("empty text")
LINEAR_TO_RAND_SEARCH_THRESHOLD = 15
words = text.split()
if len(words) < LINEAR_TO_RAND_SEARCH_THRESHOLD:
return linearFind(wordList, words)
return randTestFind(wordList, words)
def strip(word, pred = lambda char: char.isalpha()):
newWord = ""
for char in word:
if pred(char):
newWord += char
return newWord
|
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import warnings
from copy import deepcopy
from itertools import combinations, permutations
import numpy as np
import pytest
from numpy.testing import (assert_allclose, assert_, assert_equal,
assert_raises, assert_array_equal, assert_warns)
import pywt
from pywt._extensions._swt import swt_axis
# Check that float32 and complex64 are preserved. Other real types get
# converted to float64.
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# tolerances used in accuracy comparisons
tol_single = 1e-6
tol_double = 1e-13
####
# 1d multilevel swt tests
####
def test_swt_decomposition():
x = [3, 7, 1, 3, -2, 6, 4, 6]
db1 = pywt.Wavelet('db1')
atol = tol_double
(cA3, cD3), (cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=3)
expected_cA1 = [7.07106781, 5.65685425, 2.82842712, 0.70710678,
2.82842712, 7.07106781, 7.07106781, 6.36396103]
assert_allclose(cA1, expected_cA1, rtol=1e-8, atol=atol)
expected_cD1 = [-2.82842712, 4.24264069, -1.41421356, 3.53553391,
-5.65685425, 1.41421356, -1.41421356, 2.12132034]
assert_allclose(cD1, expected_cD1, rtol=1e-8, atol=atol)
expected_cA2 = [7, 4.5, 4, 5.5, 7, 9.5, 10, 8.5]
assert_allclose(cA2, expected_cA2, rtol=tol_double, atol=atol)
expected_cD2 = [3, 3.5, 0, -4.5, -3, 0.5, 0, 0.5]
assert_allclose(cD2, expected_cD2, rtol=tol_double, atol=atol)
expected_cA3 = [9.89949494, ] * 8
assert_allclose(cA3, expected_cA3, rtol=1e-8, atol=atol)
expected_cD3 = [0.00000000, -3.53553391, -4.24264069, -2.12132034,
0.00000000, 3.53553391, 4.24264069, 2.12132034]
assert_allclose(cD3, expected_cD3, rtol=1e-8, atol=atol)
# level=1, start_level=1 decomposition should match level=2
res = pywt.swt(cA1, db1, level=1, start_level=1)
cA2, cD2 = res[0]
assert_allclose(cA2, expected_cA2, rtol=tol_double, atol=atol)
assert_allclose(cD2, expected_cD2, rtol=tol_double, atol=atol)
coeffs = pywt.swt(x, db1)
assert_(len(coeffs) == 3)
assert_(pywt.swt_max_level(len(x)), 3)
def test_swt_max_level():
# odd sized signal will warn about no levels of decomposition possible
assert_warns(UserWarning, pywt.swt_max_level, 11)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_equal(pywt.swt_max_level(11), 0)
# no warnings when >= 1 level of decomposition possible
assert_equal(pywt.swt_max_level(2), 1) # divisible by 2**1
assert_equal(pywt.swt_max_level(4*3), 2) # divisible by 2**2
assert_equal(pywt.swt_max_level(16), 4) # divisible by 2**4
assert_equal(pywt.swt_max_level(16*3), 4) # divisible by 2**4
def test_swt_axis():
x = [3, 7, 1, 3, -2, 6, 4, 6]
db1 = pywt.Wavelet('db1')
(cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=2)
# test cases use 2D arrays based on tiling x along an axis and then
# calling swt along the other axis.
for order in ['C', 'F']:
# test SWT of 2D data along default axis (-1)
x_2d = np.asarray(x).reshape((1, -1))
x_2d = np.concatenate((x_2d, )*5, axis=0)
if order == 'C':
x_2d = np.ascontiguousarray(x_2d)
elif order == 'F':
x_2d = np.asfortranarray(x_2d)
(cA2_2d, cD2_2d), (cA1_2d, cD1_2d) = pywt.swt(x_2d, db1, level=2)
for c in [cA2_2d, cD2_2d, cA1_2d, cD1_2d]:
assert_(c.shape == x_2d.shape)
# each row should match the 1D result
for row in cA1_2d:
assert_array_equal(row, cA1)
for row in cA2_2d:
assert_array_equal(row, cA2)
for row in cD1_2d:
assert_array_equal(row, cD1)
for row in cD2_2d:
assert_array_equal(row, cD2)
# test SWT of 2D data along other axis (0)
x_2d = np.asarray(x).reshape((-1, 1))
x_2d = np.concatenate((x_2d, )*5, axis=1)
if order == 'C':
x_2d = np.ascontiguousarray(x_2d)
elif order == 'F':
x_2d = np.asfortranarray(x_2d)
(cA2_2d, cD2_2d), (cA1_2d, cD1_2d) = pywt.swt(x_2d, db1, level=2,
axis=0)
for c in [cA2_2d, cD2_2d, cA1_2d, cD1_2d]:
assert_(c.shape == x_2d.shape)
# each column should match the 1D result
for row in cA1_2d.transpose((1, 0)):
assert_array_equal(row, cA1)
for row in cA2_2d.transpose((1, 0)):
assert_array_equal(row, cA2)
for row in cD1_2d.transpose((1, 0)):
assert_array_equal(row, cD1)
for row in cD2_2d.transpose((1, 0)):
assert_array_equal(row, cD2)
# axis too large
assert_raises(ValueError, pywt.swt, x, db1, level=2, axis=5)
def test_swt_iswt_integration():
# This function performs a round-trip swt/iswt transform test on
# all available types of wavelets in PyWavelets - except the
# 'dmey' wavelet. The latter has been excluded because it does not
# produce very precise results. This is likely due to the fact
# that the 'dmey' wavelet is a discrete approximation of a
# continuous wavelet. All wavelets are tested up to 3 levels. The
# test validates neither swt or iswt as such, but it does ensure
# that they are each other's inverse.
max_level = 3
wavelets = pywt.wavelist(kind='discrete')
if 'dmey' in wavelets:
# The 'dmey' wavelet seems to be a bit special - disregard it for now
wavelets.remove('dmey')
for current_wavelet_str in wavelets:
current_wavelet = pywt.Wavelet(current_wavelet_str)
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power + max_level - 1)
X = np.arange(input_length)
for norm in [True, False]:
if norm and not current_wavelet.orthogonal:
# non-orthogonal wavelets to avoid warnings when norm=True
continue
for trim_approx in [True, False]:
coeffs = pywt.swt(X, current_wavelet, max_level,
trim_approx=trim_approx, norm=norm)
Y = pywt.iswt(coeffs, current_wavelet, norm=norm)
assert_allclose(Y, X, rtol=1e-5, atol=1e-7)
def test_swt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
errmsg = "wrong dtype returned for {0} input".format(dt_in)
# swt
x = np.ones(8, dtype=dt_in)
(cA2, cD2), (cA1, cD1) = pywt.swt(x, wavelet, level=2)
assert_(cA2.dtype == cD2.dtype == cA1.dtype == cD1.dtype == dt_out,
"swt: " + errmsg)
# swt2
x = np.ones((8, 8), dtype=dt_in)
cA, (cH, cV, cD) = pywt.swt2(x, wavelet, level=1)[0]
assert_(cA.dtype == cH.dtype == cV.dtype == cD.dtype == dt_out,
"swt2: " + errmsg)
def test_swt_roundtrip_dtypes():
# verify perfect reconstruction for all dtypes
rstate = np.random.RandomState(5)
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
# swt, iswt
x = rstate.standard_normal((8, )).astype(dt_in)
c = pywt.swt(x, wavelet, level=2)
xr = pywt.iswt(c, wavelet)
assert_allclose(x, xr, rtol=1e-6, atol=1e-7)
# swt2, iswt2
x = rstate.standard_normal((8, 8)).astype(dt_in)
c = pywt.swt2(x, wavelet, level=2)
xr = pywt.iswt2(c, wavelet)
assert_allclose(x, xr, rtol=1e-6, atol=1e-7)
def test_swt_default_level_by_axis():
# make sure default number of levels matches the max level along the axis
wav = 'db2'
x = np.ones((2**3, 2**4, 2**5))
for axis in (0, 1, 2):
sdec = pywt.swt(x, wav, level=None, start_level=0, axis=axis)
assert_equal(len(sdec), pywt.swt_max_level(x.shape[axis]))
def test_swt2_ndim_error():
x = np.ones(8)
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
assert_raises(ValueError, pywt.swt2, x, 'haar', level=1)
@pytest.mark.slow
def test_swt2_iswt2_integration(wavelets=None):
# This function performs a round-trip swt2/iswt2 transform test on
# all available types of wavelets in PyWavelets - except the
# 'dmey' wavelet. The latter has been excluded because it does not
# produce very precise results. This is likely due to the fact
# that the 'dmey' wavelet is a discrete approximation of a
# continuous wavelet. All wavelets are tested up to 3 levels. The
# test validates neither swt2 or iswt2 as such, but it does ensure
# that they are each other's inverse.
max_level = 3
if wavelets is None:
wavelets = pywt.wavelist(kind='discrete')
if 'dmey' in wavelets:
# The 'dmey' wavelet is a special case - disregard it for now
wavelets.remove('dmey')
for current_wavelet_str in wavelets:
current_wavelet = pywt.Wavelet(current_wavelet_str)
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power + max_level - 1)
X = np.arange(input_length**2).reshape(input_length, input_length)
for norm in [True, False]:
if norm and not current_wavelet.orthogonal:
# non-orthogonal wavelets to avoid warnings when norm=True
continue
for trim_approx in [True, False]:
coeffs = pywt.swt2(X, current_wavelet, max_level,
trim_approx=trim_approx, norm=norm)
Y = pywt.iswt2(coeffs, current_wavelet, norm=norm)
assert_allclose(Y, X, rtol=1e-5, atol=1e-5)
def test_swt2_iswt2_quick():
test_swt2_iswt2_integration(wavelets=['db1', ])
def test_swt2_iswt2_non_square(wavelets=None):
for nrows in [8, 16, 48]:
X = np.arange(nrows*32).reshape(nrows, 32)
current_wavelet = 'db1'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
coeffs = pywt.swt2(X, current_wavelet, level=2)
Y = pywt.iswt2(coeffs, current_wavelet)
assert_allclose(Y, X, rtol=tol_single, atol=tol_single)
def test_swt2_axes():
atol = 1e-14
current_wavelet = pywt.Wavelet('db2')
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power)
X = np.arange(input_length**2).reshape(input_length, input_length)
(cA1, (cH1, cV1, cD1)) = pywt.swt2(X, current_wavelet, level=1)[0]
# opposite order
(cA2, (cH2, cV2, cD2)) = pywt.swt2(X, current_wavelet, level=1,
axes=(1, 0))[0]
assert_allclose(cA1, cA2, atol=atol)
assert_allclose(cH1, cV2, atol=atol)
assert_allclose(cV1, cH2, atol=atol)
assert_allclose(cD1, cD2, atol=atol)
# duplicate axes not allowed
assert_raises(ValueError, pywt.swt2, X, current_wavelet, 1,
axes=(0, 0))
# too few axes
assert_raises(ValueError, pywt.swt2, X, current_wavelet, 1, axes=(0, ))
def test_iswt2_2d_only():
# iswt2 is not currently compatible with data that is not 2D
x_3d = np.ones((4, 4, 4))
c = pywt.swt2(x_3d, 'haar', level=1)
assert_raises(ValueError, pywt.iswt2, c, 'haar')
def test_swtn_axes():
atol = 1e-14
current_wavelet = pywt.Wavelet('db2')
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power)
X = np.arange(input_length**2).reshape(input_length, input_length)
coeffs = pywt.swtn(X, current_wavelet, level=1, axes=None)[0]
# opposite order
coeffs2 = pywt.swtn(X, current_wavelet, level=1, axes=(1, 0))[0]
assert_allclose(coeffs['aa'], coeffs2['aa'], atol=atol)
assert_allclose(coeffs['ad'], coeffs2['da'], atol=atol)
assert_allclose(coeffs['da'], coeffs2['ad'], atol=atol)
assert_allclose(coeffs['dd'], coeffs2['dd'], atol=atol)
# 0-level transform
empty = pywt.swtn(X, current_wavelet, level=0)
assert_equal(empty, [])
# duplicate axes not allowed
assert_raises(ValueError, pywt.swtn, X, current_wavelet, 1, axes=(0, 0))
# data.ndim = 0
assert_raises(ValueError, pywt.swtn, np.asarray([]), current_wavelet, 1)
# start_level too large
assert_raises(ValueError, pywt.swtn, X, current_wavelet,
level=1, start_level=2)
# level < 1 in swt_axis call
assert_raises(ValueError, swt_axis, X, current_wavelet, level=0,
start_level=0)
# odd-sized data not allowed
assert_raises(ValueError, swt_axis, X[:-1, :], current_wavelet, level=0,
start_level=0, axis=0)
@pytest.mark.slow
def test_swtn_iswtn_integration(wavelets=None):
# This function performs a round-trip swtn/iswtn transform for various
# possible combinations of:
# 1.) 1 out of 2 axes of a 2D array
# 2.) 2 out of 3 axes of a 3D array
#
# To keep test time down, only wavelets of length <= 8 are run.
#
# This test does not validate swtn or iswtn individually, but only
# confirms that iswtn yields an (almost) perfect reconstruction of swtn.
max_level = 3
if wavelets is None:
wavelets = pywt.wavelist(kind='discrete')
if 'dmey' in wavelets:
# The 'dmey' wavelet is a special case - disregard it for now
wavelets.remove('dmey')
for ndim_transform in range(1, 3):
ndim = ndim_transform + 1
for axes in combinations(range(ndim), ndim_transform):
for current_wavelet_str in wavelets:
wav = pywt.Wavelet(current_wavelet_str)
if wav.dec_len > 8:
continue # avoid excessive test duration
input_length_power = int(np.ceil(np.log2(max(
wav.dec_len,
wav.rec_len))))
N = 2**(input_length_power + max_level - 1)
X = np.arange(N**ndim).reshape((N, )*ndim)
for norm in [True, False]:
if norm and not wav.orthogonal:
# non-orthogonal wavelets to avoid warnings
continue
for trim_approx in [True, False]:
coeffs = pywt.swtn(X, wav, max_level, axes=axes,
trim_approx=trim_approx, norm=norm)
coeffs_copy = deepcopy(coeffs)
Y = pywt.iswtn(coeffs, wav, axes=axes, norm=norm)
assert_allclose(Y, X, rtol=1e-5, atol=1e-5)
# verify the inverse transform didn't modify any coeffs
for c, c2 in zip(coeffs, coeffs_copy):
for k, v in c.items():
assert_array_equal(c2[k], v)
def test_swtn_iswtn_quick():
test_swtn_iswtn_integration(wavelets=['db1', ])
def test_iswtn_errors():
x = np.arange(8**3).reshape(8, 8, 8)
max_level = 2
axes = (0, 1)
w = pywt.Wavelet('db1')
coeffs = pywt.swtn(x, w, max_level, axes=axes)
# more axes than dimensions transformed
assert_raises(ValueError, pywt.iswtn, coeffs, w, axes=(0, 1, 2))
# duplicate axes not allowed
assert_raises(ValueError, pywt.iswtn, coeffs, w, axes=(0, 0))
# mismatched coefficient size
coeffs[0]['da'] = coeffs[0]['da'][:-1, :]
assert_raises(RuntimeError, pywt.iswtn, coeffs, w, axes=axes)
def test_swtn_iswtn_unique_shape_per_axis():
# test case for gh-460
_shape = (1, 48, 32) # unique shape per axis
wav = 'sym2'
max_level = 3
rstate = np.random.RandomState(0)
for shape in permutations(_shape):
# transform only along the non-singleton axes
axes = [ax for ax, s in enumerate(shape) if s != 1]
x = rstate.standard_normal(shape)
c = pywt.swtn(x, wav, max_level, axes=axes)
r = pywt.iswtn(c, wav, axes=axes)
assert_allclose(x, r, rtol=1e-10, atol=1e-10)
def test_per_axis_wavelets():
# tests seperate wavelet for each axis.
rstate = np.random.RandomState(1234)
data = rstate.randn(16, 16, 16)
level = 3
# wavelet can be a string or wavelet object
wavelets = (pywt.Wavelet('haar'), 'sym2', 'db4')
coefs = pywt.swtn(data, wavelets, level=level)
assert_allclose(pywt.iswtn(coefs, wavelets), data, atol=1e-14)
# 1-tuple also okay
coefs = pywt.swtn(data, wavelets[:1], level=level)
assert_allclose(pywt.iswtn(coefs, wavelets[:1]), data, atol=1e-14)
# length of wavelets doesn't match the length of axes
assert_raises(ValueError, pywt.swtn, data, wavelets[:2], level)
assert_raises(ValueError, pywt.iswtn, coefs, wavelets[:2])
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
# swt2/iswt2 also support per-axis wavelets/modes
data2 = data[..., 0]
coefs2 = pywt.swt2(data2, wavelets[:2], level)
assert_allclose(pywt.iswt2(coefs2, wavelets[:2]), data2, atol=1e-14)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((16, 16))
for dec_func, rec_func in zip([pywt.swt, pywt.swt2, pywt.swtn],
[pywt.iswt, pywt.iswt2, pywt.iswtn]):
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, dec_func, data, wavelet=cwave,
level=3)
c = dec_func(data, 'db1', level=3)
assert_raises(ValueError, rec_func, c, wavelet=cwave)
def test_iswt_mixed_dtypes():
# Mixed precision inputs give double precision output
x_real = np.arange(16).astype(np.float64)
x_complex = x_real + 1j*x_real
wav = 'sym2'
for dtype1, dtype2 in [(np.float64, np.float32),
(np.float32, np.float64),
(np.float16, np.float64),
(np.complex128, np.complex64),
(np.complex64, np.complex128)]:
if dtype1 in [np.complex64, np.complex128]:
x = x_complex
output_dtype = np.complex128
else:
x = x_real
output_dtype = np.float64
coeffs = pywt.swt(x, wav, 2)
# different precision for the approximation coefficients
coeffs[0] = [coeffs[0][0].astype(dtype1),
coeffs[0][1].astype(dtype2)]
y = pywt.iswt(coeffs, wav)
assert_equal(output_dtype, y.dtype)
assert_allclose(y, x, rtol=1e-3, atol=1e-3)
def test_iswt2_mixed_dtypes():
# Mixed precision inputs give double precision output
rstate = np.random.RandomState(0)
x_real = rstate.randn(8, 8)
x_complex = x_real + 1j*x_real
wav = 'sym2'
for dtype1, dtype2 in [(np.float64, np.float32),
(np.float32, np.float64),
(np.float16, np.float64),
(np.complex128, np.complex64),
(np.complex64, np.complex128)]:
if dtype1 in [np.complex64, np.complex128]:
x = x_complex
output_dtype = np.complex128
else:
x = x_real
output_dtype = np.float64
coeffs = pywt.swt2(x, wav, 2)
# different precision for the approximation coefficients
coeffs[0] = [coeffs[0][0].astype(dtype1),
tuple([c.astype(dtype2) for c in coeffs[0][1]])]
y = pywt.iswt2(coeffs, wav)
assert_equal(output_dtype, y.dtype)
assert_allclose(y, x, rtol=1e-3, atol=1e-3)
def test_iswtn_mixed_dtypes():
# Mixed precision inputs give double precision output
rstate = np.random.RandomState(0)
x_real = rstate.randn(8, 8, 8)
x_complex = x_real + 1j*x_real
wav = 'sym2'
for dtype1, dtype2 in [(np.float64, np.float32),
(np.float32, np.float64),
(np.float16, np.float64),
(np.complex128, np.complex64),
(np.complex64, np.complex128)]:
if dtype1 in [np.complex64, np.complex128]:
x = x_complex
output_dtype = np.complex128
else:
x = x_real
output_dtype = np.float64
coeffs = pywt.swtn(x, wav, 2)
# different precision for the approximation coefficients
a = coeffs[0].pop('a' * x.ndim)
a = a.astype(dtype1)
coeffs[0] = {k: c.astype(dtype2) for k, c in coeffs[0].items()}
coeffs[0]['a' * x.ndim] = a
y = pywt.iswtn(coeffs, wav)
assert_equal(output_dtype, y.dtype)
assert_allclose(y, x, rtol=1e-3, atol=1e-3)
def test_swt_zero_size_axes():
# raise on empty input array
assert_raises(ValueError, pywt.swt, [], 'db2')
# >1D case uses a different code path so check there as well
x = np.ones((1, 4))[0:0, :] # 2D with a size zero axis
assert_raises(ValueError, pywt.swtn, x, 'db2', level=1, axes=(0,))
def test_swt_variance_and_energy_preservation():
"""Verify that the 1D SWT partitions variance among the coefficients."""
# When norm is True and the wavelet is orthogonal, the sum of the
# variances of the coefficients should equal the variance of the signal.
wav = 'db2'
rstate = np.random.RandomState(5)
x = rstate.randn(256)
coeffs = pywt.swt(x, wav, trim_approx=True, norm=True)
variances = [np.var(c) for c in coeffs]
assert_allclose(np.sum(variances), np.var(x))
# also verify L2-norm energy preservation property
assert_allclose(np.linalg.norm(x),
np.linalg.norm(np.concatenate(coeffs)))
# non-orthogonal wavelet with norm=True raises a warning
assert_warns(UserWarning, pywt.swt, x, 'bior2.2', norm=True)
def test_swt2_variance_and_energy_preservation():
"""Verify that the 2D SWT partitions variance among the coefficients."""
# When norm is True and the wavelet is orthogonal, the sum of the
# variances of the coefficients should equal the variance of the signal.
wav = 'db2'
rstate = np.random.RandomState(5)
x = rstate.randn(64, 64)
coeffs = pywt.swt2(x, wav, level=4, trim_approx=True, norm=True)
coeff_list = [coeffs[0].ravel()]
for d in coeffs[1:]:
for v in d:
coeff_list.append(v.ravel())
variances = [np.var(v) for v in coeff_list]
assert_allclose(np.sum(variances), np.var(x))
# also verify L2-norm energy preservation property
assert_allclose(np.linalg.norm(x),
np.linalg.norm(np.concatenate(coeff_list)))
# non-orthogonal wavelet with norm=True raises a warning
assert_warns(UserWarning, pywt.swt2, x, 'bior2.2', level=4, norm=True)
def test_swtn_variance_and_energy_preservation():
"""Verify that the nD SWT partitions variance among the coefficients."""
# When norm is True and the wavelet is orthogonal, the sum of the
# variances of the coefficients should equal the variance of the signal.
wav = 'db2'
rstate = np.random.RandomState(5)
x = rstate.randn(64, 64)
coeffs = pywt.swtn(x, wav, level=4, trim_approx=True, norm=True)
coeff_list = [coeffs[0].ravel()]
for d in coeffs[1:]:
for k, v in d.items():
coeff_list.append(v.ravel())
variances = [np.var(v) for v in coeff_list]
assert_allclose(np.sum(variances), np.var(x))
# also verify L2-norm energy preservation property
assert_allclose(np.linalg.norm(x),
np.linalg.norm(np.concatenate(coeff_list)))
# non-orthogonal wavelet with norm=True raises a warning
assert_warns(UserWarning, pywt.swtn, x, 'bior2.2', level=4, norm=True)
def test_swt_ravel_and_unravel():
# When trim_approx=True, all swt functions can user pywt.ravel_coeffs
for ndim, _swt, _iswt, ravel_type in [
(1, pywt.swt, pywt.iswt, 'swt'),
(2, pywt.swt2, pywt.iswt2, 'swt2'),
(3, pywt.swtn, pywt.iswtn, 'swtn')]:
x = np.ones((16, ) * ndim)
c = _swt(x, 'sym2', level=3, trim_approx=True)
arr, slices, shapes = pywt.ravel_coeffs(c)
c = pywt.unravel_coeffs(arr, slices, shapes, output_format=ravel_type)
r = _iswt(c, 'sym2')
assert_allclose(x, r)
|
|
#!/usr/bin/env python
# Copyright (c) 2013, 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import json
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
# get xwalk absolute path so we can run this script from any location
xwalk_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(xwalk_dir)
from app_info import AppInfo
from customize import VerifyPackageName, CustomizeAll, \
ParseParameterForCompressor, CustomizeManifest
from extension_manager import GetExtensionList, GetExtensionStatus
from handle_permissions import permission_mapping_table
from util import AllArchitectures, CleanDir, GetVersion, RunCommand, \
CreateAndCopyDir, GetBuildDir
from manifest_json_parser import HandlePermissionList
from manifest_json_parser import ManifestJsonParser
NATIVE_LIBRARY = 'libxwalkcore.so'
def ConvertArchNameToArchFolder(arch):
arch_dict = {
'x86': 'x86',
'x86_64': 'x86_64',
'arm': 'armeabi-v7a'
}
return arch_dict.get(arch, None)
def AddExeExtensions(name):
exts_str = os.environ.get('PATHEXT', '').lower()
exts = [_f for _f in exts_str.split(os.pathsep) if _f]
result = []
for e in exts:
result.append(name + e)
result.append(name)
return result
def Which(name):
"""Searches PATH for executable files with the given name, also taking
PATHEXT into account. Returns the first existing match, or None if no matches
are found."""
for path in os.environ.get('PATH', '').split(os.pathsep):
for filename in AddExeExtensions(name):
full_path = os.path.join(path, filename)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def GetAndroidApiLevel(android_path):
"""Get Highest Android target level installed.
return -1 if no targets have been found.
"""
target_output = RunCommand([android_path, 'list', 'target', '-c'])
target_regex = re.compile(r'android-(\d+)')
targets = [int(i) for i in target_regex.findall(target_output)]
targets.extend([-1])
return max(targets)
def ContainsNativeLibrary(path):
return os.path.isfile(os.path.join(path, NATIVE_LIBRARY))
def ParseManifest(options):
parser = ManifestJsonParser(os.path.expanduser(options.manifest))
if not options.name:
options.name = parser.GetAppName()
if not options.app_version:
options.app_version = parser.GetVersion()
if parser.GetDescription():
options.description = parser.GetDescription()
if parser.GetPermissions():
options.permissions = parser.GetPermissions()
if parser.GetAppUrl():
options.app_url = parser.GetAppUrl()
elif parser.GetAppLocalPath():
options.app_local_path = parser.GetAppLocalPath()
else:
print('Error: there is no app launch path defined in manifest.json.')
sys.exit(9)
options.icon_dict = {}
if parser.GetAppRoot():
options.app_root = parser.GetAppRoot()
options.icon_dict = parser.GetIcons()
if parser.GetOrientation():
options.orientation = parser.GetOrientation()
if parser.GetFullScreenFlag().lower() == 'true':
options.fullscreen = True
elif parser.GetFullScreenFlag().lower() == 'false':
options.fullscreen = False
return parser
def ParseXPK(options, out_dir):
cmd = ['python', os.path.join(xwalk_dir, 'parse_xpk.py'),
'--file=%s' % os.path.expanduser(options.xpk),
'--out=%s' % out_dir]
RunCommand(cmd)
if options.manifest:
print ('Use the manifest from XPK by default '
'when "--xpk" option is specified, and '
'the "--manifest" option would be ignored.')
sys.exit(7)
if os.path.isfile(os.path.join(out_dir, 'manifest.json')):
options.manifest = os.path.join(out_dir, 'manifest.json')
else:
print('XPK doesn\'t contain manifest file.')
sys.exit(8)
def FindExtensionJars(root_path):
''' Find all .jar files for external extensions. '''
extension_jars = []
if not os.path.exists(root_path):
return extension_jars
for afile in os.listdir(root_path):
if os.path.isdir(os.path.join(root_path, afile)):
base_name = os.path.basename(afile)
extension_jar = os.path.join(root_path, afile, base_name + '.jar')
if os.path.isfile(extension_jar):
extension_jars.append(extension_jar)
return extension_jars
# Try to parse out suitable app_versionCodeBase based on app_version,
# app_version "xx.xx.xxx" will generate app_versionCodeBase "xxxxxxx"
# For example, "1.2.3" will generate app_versionCodeBase "0102003"
# "1.2" will generate "0102000"
# If app_version does not match
# r'(\d{1,2}\.+)(\d{1,2}\.+)(\d{1,3})$|(|\d{1,2}\.+)(\d{1,2})$',
# notify user the failure
def TryCodeBaseFromVersionName(app_version):
m = re.match(r'(\d{1,2}\.+)(\d{1,2}\.+)(\d{1,3})$|(|\d{1,2}\.+)(\d{1,2})$',
app_version)
if not m:
print('Can not parse out app_versionCodeBase from app_version, '
'please specify --app-versionCode or --app-versionCodeBase : '
'app_version=%s' % (app_version))
sys.exit(12)
versionList = []
for item in m.groups():
if (item and len(item) > 0):
versionList.append(item.strip('.'))
n = len(versionList)
while n < 3:
versionList.append('0')
n = n + 1
versionCodeBase = versionList[0].zfill(2)
versionCodeBase = versionCodeBase + versionList[1].zfill(2)
versionCodeBase = versionCodeBase + versionList[2].zfill(3)
return versionCodeBase
# Follows the recommendation from
# http://software.intel.com/en-us/blogs/2012/11/12/how-to-publish-
# your-apps-on-google-play-for-x86-based-android-devices-using
def MakeVersionCode(options, app_version):
''' Construct a version code'''
# If user specified --app-versionCode, use it forcely
if options.app_versionCode:
return options.app_versionCode
# First digit is ABI, ARM=2, x86=6
abi = '0'
if options.arch == 'arm':
abi = '2'
if options.arch == 'x86':
abi = '6'
if options.arch == 'x86_64':
abi = '7'
b = '0'
# If user specified --app-versionCodeBase,add ABI prefix to it as versionCode
if options.app_versionCodeBase:
b = str(options.app_versionCodeBase)
if len(b) > 7:
print('Version code base must be 7 digits or less: '
'versionCodeBase=%s' % (b))
sys.exit(12)
# If both --app-versionCode and --app-versionCodeBase not specified,
# try to parse out versionCodeBase based on app_version
else:
b = TryCodeBaseFromVersionName(app_version)
# zero pad to 7 digits, middle digits can be used for other
# features, according to recommendation in URL
return '%s%s' % (abi, b.zfill(7))
def GetExtensionBinaryPathList():
local_extension_list = []
extensions_path = os.path.join(os.getcwd(), "extensions")
exist_extension_list = GetExtensionList(extensions_path)
for item in exist_extension_list:
build_json_path = os.path.join(extensions_path, item, "build.json")
with open(build_json_path) as fd:
data = json.load(fd)
if not GetExtensionStatus(item, extensions_path):
continue
else:
if data.get("binary_path", False):
extension_binary_path = os.path.join(extensions_path,
item,
data["binary_path"])
else:
print("The extension \"%s\" doesn't exists." % item)
sys.exit(1)
if os.path.isdir(extension_binary_path):
local_extension_list.append(extension_binary_path)
else:
print("The extension \"%s\" doesn't exists." % item)
sys.exit(1)
return local_extension_list
def Customize(options, app_info, manifest):
app_info.package = options.package
app_info.app_name = options.name
# 'org.xwalk.my_first_app' => 'MyFirstApp'
android_name = options.package.split('.')[-1].split('_')
app_info.android_name = ''.join([i.capitalize() for i in android_name if i])
if options.app_version:
app_info.app_version = options.app_version
if options.app_root:
app_info.app_root = os.path.expanduser(options.app_root)
if options.enable_remote_debugging:
app_info.remote_debugging = '--enable-remote-debugging'
if options.use_animatable_view:
app_info.use_animatable_view = '--use-animatable-view'
if options.fullscreen:
app_info.fullscreen_flag = '-f'
if options.orientation:
app_info.orientation = options.orientation
if options.icon:
app_info.icon = '%s' % os.path.expanduser(options.icon)
#Add local extensions to extension list.
extension_binary_path_list = GetExtensionBinaryPathList()
if len(extension_binary_path_list) > 0:
if options.extensions is None:
options.extensions = ""
else:
options.extensions += os.pathsep
for item in extension_binary_path_list:
options.extensions += item
options.extensions += os.pathsep
#trim final path separator
options.extensions = options.extensions[0:-1]
CustomizeAll(app_info, options.description, options.icon_dict,
options.permissions, options.app_url, options.app_local_path,
options.keep_screen_on, options.extensions, manifest,
options.xwalk_command_line, options.compressor)
def Execution(options, app_info):
# Now we've got correct app_version and correct ABI value,
# start to generate suitable versionCode
app_info.app_versionCode = MakeVersionCode(options, app_info.app_version)
# Write generated versionCode into AndroidManifest.xml.
# Later if we have other customization,
# we can put them together into CustomizeManifest func.
CustomizeManifest(app_info)
name = app_info.android_name
arch_string = (' ('+options.arch+')' if options.arch else '')
print('\nStarting application build' + arch_string)
app_dir = GetBuildDir(name)
android_path = Which('android')
api_level = GetAndroidApiLevel(android_path)
target_string = 'android-%d' % api_level
print (' * Checking keystore for signing')
if options.keystore_path:
key_store = os.path.expanduser(options.keystore_path)
if options.keystore_alias:
key_alias = options.keystore_alias
else:
print('Please provide an alias name of the developer key.')
sys.exit(6)
if options.keystore_passcode:
key_code = options.keystore_passcode
else:
key_code = None
if options.keystore_alias_passcode:
key_alias_code = options.keystore_alias_passcode
else:
key_alias_code = None
else:
print(' No keystore provided for signing. Using xwalk\'s keystore '
'for debugging.\n Please use a valid keystore when '
'distributing to the app market.')
key_store = os.path.join(xwalk_dir, 'xwalk-debug.keystore')
key_alias = 'xwalkdebugkey'
key_code = 'xwalkdebug'
key_alias_code = 'xwalkdebug'
# Update android project for app and xwalk_core_library.
update_project_cmd = [android_path, 'update', 'project',
'--path', app_dir,
'--target', target_string,
'--name', name]
if options.mode == 'embedded':
print(' * Updating project with xwalk_core_library')
RunCommand([android_path, 'update', 'lib-project',
'--path', os.path.join(app_dir, 'xwalk_core_library'),
'--target', target_string])
update_project_cmd.extend(['-l', 'xwalk_core_library'])
else:
print(' * Updating project')
RunCommand(update_project_cmd)
# Check whether external extensions are included.
print(' * Checking for external extensions')
extensions_string = 'xwalk-extensions'
extensions_dir = os.path.join(app_dir, extensions_string)
external_extension_jars = FindExtensionJars(extensions_dir)
for external_extension_jar in external_extension_jars:
shutil.copyfile(external_extension_jar,
os.path.join(app_dir, 'libs',
os.path.basename(external_extension_jar)))
if options.mode == 'embedded':
print (' * Copying native libraries for %s' % options.arch)
# Remove existing native libraries in xwalk_core_library, they are probably
# for the last execution to make apk for another CPU arch.
# And then copy the native libraries for the specified arch into
# xwalk_core_library.
arch = ConvertArchNameToArchFolder(options.arch)
if not arch:
print ('Invalid CPU arch: %s.' % arch)
sys.exit(10)
library_lib_path = os.path.join(app_dir, 'xwalk_core_library', 'libs')
for dir_name in os.listdir(library_lib_path):
lib_dir = os.path.join(library_lib_path, dir_name)
if ContainsNativeLibrary(lib_dir):
shutil.rmtree(lib_dir)
native_lib_path = os.path.join(app_dir, 'native_libs', arch)
if ContainsNativeLibrary(native_lib_path):
shutil.copytree(native_lib_path, os.path.join(library_lib_path, arch))
else:
print('No %s native library has been found for creating a Crosswalk '
'embedded APK.' % arch)
sys.exit(10)
if options.project_only:
print (' (Skipping apk package creation)')
return
# Build the APK
if options.mode == 'embedded':
print(' * Building Android apk package with Crosswalk embedded' +
arch_string)
else:
print(' * Building Android apk package')
ant_path = Which('ant')
ant_cmd = [ant_path, 'release', '-f', os.path.join(app_dir, 'build.xml')]
ant_cmd.extend(['-Dkey.store=%s' % os.path.abspath(key_store)])
ant_cmd.extend(['-Dkey.alias=%s' % key_alias])
if key_code:
ant_cmd.extend(['-Dkey.store.password=%s' % key_code])
if key_alias_code:
ant_cmd.extend(['-Dkey.alias.password=%s' % key_alias_code])
cmd_display = ' '.join([str(item) for item in ant_cmd])
if options.verbose:
print('Executing:\n %s\n' % cmd_display)
else:
ant_cmd.extend(['-quiet'])
ant_result = subprocess.call(ant_cmd)
if ant_result != 0:
print('Command "%s" exited with non-zero exit code %d'
% (cmd_display, ant_result))
sys.exit(ant_result)
src_file = os.path.join(app_dir, 'bin', '%s-release.apk' % name)
package_name = name
if options.app_version:
package_name += ('_' + options.app_version)
if options.mode == 'shared':
dst_file = os.path.join(options.target_dir, '%s.apk' % package_name)
elif options.mode == 'embedded':
dst_file = os.path.join(options.target_dir,
'%s_%s.apk' % (package_name, options.arch))
shutil.copyfile(src_file, dst_file)
print(' (Location: %s)' % dst_file)
def PrintPackageInfo(options, name, packaged_archs):
package_name_version = os.path.join(options.target_dir, name)
if options.app_version:
package_name_version += '_' + options.app_version
if len(packaged_archs) == 0:
print ('\nA non-platform specific APK for the web application "%s" was '
'generated successfully at:\n %s.apk.\nIt requires a shared '
'Crosswalk Runtime to be present.'
% (name, package_name_version))
return
all_archs = set(AllArchitectures())
if len(packaged_archs) != len(all_archs):
missed_archs = all_archs - set(packaged_archs)
print ('\nNote: This APK will only work on %s-based Android devices.'
' Consider building\nfor %s as well.' %
(', '.join(packaged_archs), ', '.join(missed_archs)))
else:
print ("\nApplication apk's were created for %d architectures (%s)." %
(len(all_archs), (','.join(all_archs))))
print ('If you submit this application to an application '
'store, please submit both\npackages. Instructions '
'for submitting multiple APKs to Google Play Store are\navailable '
'here:')
print (' https://software.intel.com/en-us/html5/articles/submitting'
'-multiple-crosswalk-apk-to-google-play-store')
def CheckSystemRequirements():
''' Check for android, ant, template dir '''
sys.stdout.write('Checking system requirements...')
sys.stdout.flush()
# check android install
android_path = Which('android')
if android_path is None:
print('failed\nThe "android" binary could not be found. Check your Android '
'SDK installation and your PATH environment variable.')
sys.exit(1)
if GetAndroidApiLevel(android_path) < 14:
print('failed\nPlease install Android API level (>=14) first.')
sys.exit(3)
# Check ant install
ant_path = Which('ant')
if ant_path is None:
print('failed\nAnt could not be found. Please make sure it is installed.')
sys.exit(4)
print('ok')
def MakeApk(options, app_info, manifest):
CheckSystemRequirements()
Customize(options, app_info, manifest)
name = app_info.android_name
app_dir = GetBuildDir(name)
packaged_archs = []
if options.mode == 'shared':
# For shared mode, it's not necessary to use the whole xwalk core library,
# use xwalk_core_library_java_app_part.jar from it is enough.
java_app_part_jar = os.path.join(xwalk_dir, 'xwalk_core_library', 'libs',
'xwalk_core_library_java_app_part.jar')
shutil.copy(java_app_part_jar, os.path.join(app_dir, 'libs'))
Execution(options, app_info)
elif options.mode == 'embedded':
# Copy xwalk_core_library into app folder and move the native libraries
# out.
# When making apk for specified CPU arch, will only include the
# corresponding native library by copying it back into xwalk_core_library.
target_library_path = os.path.join(app_dir, 'xwalk_core_library')
shutil.copytree(os.path.join(xwalk_dir, 'xwalk_core_library'),
target_library_path)
library_lib_path = os.path.join(target_library_path, 'libs')
native_lib_path = os.path.join(app_dir, 'native_libs')
os.makedirs(native_lib_path)
available_archs = []
for dir_name in os.listdir(library_lib_path):
lib_dir = os.path.join(library_lib_path, dir_name)
if ContainsNativeLibrary(lib_dir):
shutil.move(lib_dir, os.path.join(native_lib_path, dir_name))
available_archs.append(dir_name)
if options.arch:
Execution(options, app_info)
packaged_archs.append(options.arch)
else:
# If the arch option is unspecified, all of available platform APKs
# will be generated.
valid_archs = ['x86', 'x86_64', 'armeabi-v7a']
for arch in valid_archs:
if arch in available_archs:
if arch.find('arm') != -1:
options.arch = 'arm'
else:
options.arch = arch
print("options.arch:", options.arch)
Execution(options, app_info)
packaged_archs.append(options.arch)
else:
print('Warning: failed to create package for arch "%s" '
'due to missing native library' % arch)
if len(packaged_archs) == 0:
print('No packages created, aborting')
sys.exit(13)
# if project_dir, save build directory
if options.project_dir:
print ('\nCreating project directory')
save_dir = os.path.join(options.project_dir, name)
if CreateAndCopyDir(app_dir, save_dir, True):
print (' A project directory was created successfully in:\n %s' %
os.path.abspath(save_dir))
print (' To manually generate an APK, run the following in that '
'directory:')
print (' ant release -f build.xml')
print (' For more information, see:\n'
' http://developer.android.com/tools/building/'
'building-cmdline.html')
else:
print ('Error: Unable to create a project directory during the build. '
'Please check the directory passed in --project-dir, '
'available disk space, and write permission.')
if not options.project_only:
PrintPackageInfo(options, name, packaged_archs)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-v', '--version', action='store_true',
dest='version', default=False,
help='The version of this python tool.')
parser.add_option('--verbose', action="store_true",
dest='verbose', default=False,
help='Print debug messages.')
info = ('The packaging mode of the web application. The value \'shared\' '
'means that the runtime is shared across multiple application '
'instances and that the runtime needs to be distributed separately. '
'The value \'embedded\' means that the runtime is embedded into the '
'application itself and distributed along with it.'
'Set the default mode as \'embedded\'. For example: --mode=embedded')
parser.add_option('--mode', choices=('embedded', 'shared'),
default='embedded', help=info)
info = ('The target architecture of the embedded runtime. Supported values '
'are \'x86\' \'x86_64\' and \'arm\'. Note, if undefined, APKs for '
'all possible architestures will be generated.')
parser.add_option('--arch', choices=AllArchitectures(), help=info)
group = optparse.OptionGroup(parser, 'Application Source Options',
'This packaging tool supports 3 kinds of web application source: '
'1) XPK package; 2) manifest.json; 3) various command line options, '
'for example, \'--app-url\' for website, \'--app-root\' and '
'\'--app-local-path\' for local web application.')
info = ('The path of the XPK package. For example, --xpk=/path/to/xpk/file')
group.add_option('--xpk', help=info)
info = ('The manifest file with the detail description of the application. '
'For example, --manifest=/path/to/your/manifest/file')
group.add_option('--manifest', help=info)
info = ('The url of application. '
'This flag allows to package website as apk. For example, '
'--app-url=http://www.intel.com')
group.add_option('--app-url', help=info)
info = ('The root path of the web app. '
'This flag allows to package local web app as apk. For example, '
'--app-root=/root/path/of/the/web/app')
group.add_option('--app-root', help=info)
info = ('The relative path of entry file based on the value from '
'\'app_root\'. This flag should work with \'--app-root\' together. '
'For example, --app-local-path=/relative/path/of/entry/file')
group.add_option('--app-local-path', help=info)
parser.add_option_group(group)
# Mandatory options group
group = optparse.OptionGroup(parser, 'Mandatory arguments',
'They are used for describing the APK information through '
'command line options.')
info = ('The apk name. For example, --name="Your Application Name"')
group.add_option('--name', help=info)
info = ('The package name. For example, '
'--package=com.example.YourPackage')
group.add_option('--package', help=info)
parser.add_option_group(group)
# Optional options group (alphabetical)
group = optparse.OptionGroup(parser, 'Optional arguments',
'They are used for various settings for applications through '
'command line options.')
info = ('The version name of the application. '
'For example, --app-version=1.0.0')
group.add_option('--app-version', help=info)
info = ('The version code of the application. '
'For example, --app-versionCode=24')
group.add_option('--app-versionCode', type='int', help=info)
info = ('The version code base of the application. Version code will '
'be made by adding a prefix based on architecture to the version '
'code base. For example, --app-versionCodeBase=24')
group.add_option('--app-versionCodeBase', type='int', help=info)
info = ('The description of the application. For example, '
'--description=YourApplicationDescription')
group.add_option('--description', help=info)
group.add_option('--enable-remote-debugging', action='store_true',
dest='enable_remote_debugging', default=False,
help='Enable remote debugging.')
group.add_option('--use-animatable-view', action='store_true',
dest='use_animatable_view', default=False,
help='Enable using animatable view (TextureView).')
info = ('The list of external extension paths splitted by OS separators. '
'The separators are \':\' , \';\' and \':\' on Linux, Windows and '
'Mac OS respectively. For example, '
'--extensions=/path/to/extension1:/path/to/extension2.')
group.add_option('--extensions', help=info)
group.add_option('-f', '--fullscreen', action='store_true',
dest='fullscreen', default=False,
help='Make application fullscreen.')
group.add_option('--keep-screen-on', action='store_true', default=False,
help='Support keeping screen on')
info = ('The path of application icon. '
'Such as: --icon=/path/to/your/customized/icon')
group.add_option('--icon', help=info)
info = ('The orientation of the web app\'s display on the device. '
'For example, --orientation=landscape. The default value is '
'\'unspecified\'. The permitted values are from Android: '
'http://developer.android.com/guide/topics/manifest/'
'activity-element.html#screen')
group.add_option('--orientation', help=info)
info = ('The list of permissions to be used by web application. For example, '
'--permissions=geolocation:webgl')
group.add_option('--permissions', help=info)
info = ('Create an Android project directory with Crosswalk at this location.'
' (See project-only option below)')
group.add_option('--project-dir', help=info)
info = ('Must be used with project-dir option. Create an Android project '
'directory with Crosswalk but do not build the APK package')
group.add_option('--project-only', action='store_true', default=False,
dest='project_only', help=info)
info = ('Packaging tool will move the output APKs to the target directory')
group.add_option('--target-dir', default=os.getcwd(), help=info)
info = ('Use command lines.'
'Crosswalk is powered by Chromium and supports Chromium command line.'
'For example, '
'--xwalk-command-line=\'--chromium-command-1 --xwalk-command-2\'')
group.add_option('--xwalk-command-line', default='', help=info)
parser.add_option_group(group)
# Keystore options group
group = optparse.OptionGroup(parser, 'Keystore Options',
'The keystore is a signature from web developer, it\'s used when '
'developer wants to distribute the applications.')
info = ('The path to the developer keystore. For example, '
'--keystore-path=/path/to/your/developer/keystore')
group.add_option('--keystore-path', help=info)
info = ('The alias name of keystore. For example, --keystore-alias=name')
group.add_option('--keystore-alias', help=info)
info = ('The passcode of keystore. For example, --keystore-passcode=code')
group.add_option('--keystore-passcode', help=info)
info = ('Passcode for alias\'s private key in the keystore, '
'For example, --keystore-alias-passcode=alias-code')
group.add_option('--keystore-alias-passcode', help=info)
info = ('Minify and obfuscate javascript and css.'
'--compressor: compress javascript and css.'
'--compressor=js: compress javascript.'
'--compressor=css: compress css.')
group.add_option('--compressor', dest='compressor', action='callback',
callback=ParseParameterForCompressor, type='string',
nargs=0, help=info)
parser.add_option_group(group)
options, _ = parser.parse_args()
if len(argv) == 1:
parser.print_help()
return 0
if options.version:
if os.path.isfile('VERSION'):
print(GetVersion('VERSION'))
return 0
else:
parser.error('VERSION was not found, so Crosswalk\'s version could not '
'be determined.')
xpk_temp_dir = ''
if options.xpk:
xpk_name = os.path.splitext(os.path.basename(options.xpk))[0]
xpk_temp_dir = tempfile.mkdtemp(prefix="%s-" % xpk_name + '_xpk')
CleanDir(xpk_temp_dir)
ParseXPK(options, xpk_temp_dir)
if options.manifest:
options.manifest = os.path.abspath(options.manifest)
if not os.path.isfile(options.manifest):
print('Error: The manifest file does not exist.')
sys.exit(8)
if options.app_root and not options.manifest:
manifest_path = os.path.join(options.app_root, 'manifest.json')
if os.path.exists(manifest_path):
print('Using manifest.json distributed with the application.')
options.manifest = manifest_path
app_info = AppInfo()
manifest = None
if not options.manifest:
# The checks here are really convoluted, but at the moment make_apk
# misbehaves any of the following conditions is true.
if options.app_url:
# 1) --app-url must be passed without either --app-local-path or
# --app-root.
if options.app_root or options.app_local_path:
parser.error('You must pass either "--app-url" or "--app-local-path" '
'with "--app-root", but not all.')
else:
# 2) --app-url is not passed but only one of --app-local-path and
# --app-root is set.
if bool(options.app_root) != bool(options.app_local_path):
parser.error('You must specify both "--app-local-path" and '
'"--app-root".')
# 3) None of --app-url, --app-local-path and --app-root are passed.
elif not options.app_root and not options.app_local_path:
parser.error('You must pass either "--app-url" or "--app-local-path" '
'with "--app-root".')
if options.permissions:
permission_list = options.permissions.split(':')
else:
print('Warning: all supported permissions on Android port are added. '
'Refer to https://github.com/crosswalk-project/'
'crosswalk-website/wiki/Crosswalk-manifest')
permission_list = permission_mapping_table.keys()
options.permissions = HandlePermissionList(permission_list)
options.icon_dict = {}
else:
try:
manifest = ParseManifest(options)
except SystemExit as ec:
return ec.code
if not options.name:
parser.error('An APK name is required. Please use the "--name" option.')
if not options.package:
parser.error('A package name is required. Please use the "--package" '
'option.')
VerifyPackageName(options.package)
if (options.app_root and options.app_local_path and
not os.path.isfile(os.path.join(options.app_root,
options.app_local_path))):
print('Please make sure that the local path file of launching app '
'does exist.')
sys.exit(7)
if options.target_dir:
target_dir = os.path.abspath(os.path.expanduser(options.target_dir))
options.target_dir = target_dir
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
if options.project_only and not options.project_dir:
print('\nmake_apk.py error: Option --project-only must be used '
'with --project-dir')
sys.exit(8)
try:
MakeApk(options, app_info, manifest)
except SystemExit as ec:
return ec.code
finally:
CleanDir(GetBuildDir(app_info.android_name))
CleanDir(xpk_temp_dir)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
print('')
|
|
#! /usr/bin/python
# -*- coding:utf-8 -*-
""" arte_plus_7 is a script to help download arte videos
It's only configured for French at the moment.
Usage:
The following commands will return the videos urls found
# The generic program page
./arte_plus_7.py -u http://www.arte.tv/guide/fr/emissions/TRA/tracks
# The page dedicated to the video
./arte_plus_7.py -u http://www.arte.tv/guide/fr/034049-007/karambolage
# Direct access to some pre-stored programs
./arte_plus_7.py -p tracks
To actually download the video, add a '--qualiy <QUAL>' for the one you want
from the list
# Direct access to some pre-stored programs
./arte_plus_7.py -p tracks --quality <MQ|HQ|EQ|SQ'>
"""
from __future__ import print_function
import re
import os.path
import json
import subprocess
import argparse
import logging
from datetime import datetime
# pylint:disable=locally-disabled,import-error,no-name-in-module
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
import bs4
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler())
__version__ = '2.2.4'
def page_read(url):
"""Download the page and return the utf-8 decoded result."""
LOGGER.debug('Reading %s', url)
return urlopen(url).read().decode('utf-8')
def page_soup(page):
"""Get the page soup."""
return bs4.BeautifulSoup(page, 'html.parser')
class Plus7Program(object):
"""Describes an ArtePlus7 video.
:param video_id: video unique id
"""
JSON_URL = ('http://arte.tv/papi/tvguide/videos/stream/player/D/'
'{0}_PLUS7-D/ALL/ALL.json')
def __init__(self, video_id):
json_url = self.JSON_URL.format(self._short_id(video_id))
debug_id = '%s:%s' % (video_id, json_url)
try:
page = page_read(json_url)
except HTTPError:
raise ValueError('No JSON for id: %s' % debug_id)
_json = json.loads(page)
player = _json['videoJsonPlayer']
if player.get('custom_msg', {}).get('type', None) == 'error':
raise ValueError("Error: '%s': %s" % (player['custom_msg']['msg'],
debug_id))
# Read infos
try:
self.timestamp = player['videoBroadcastTimestamp'] / 1000.0
self.date = self._date_from_timestamp(self.timestamp)
self.name = player['VST']['VNA']
self.full_name = '{self.name}_{self.date}'.format(self=self)
self.urls = self._extract_videos(player['VSR'])
except KeyError as err:
raise ValueError('Incomplete JSON for id: %s: %s' %
(err, debug_id))
@staticmethod
def _date_from_timestamp(timestamp, fmt='%Y-%m-%d'):
"""Format timestamp to date string."""
return datetime.fromtimestamp(timestamp).strftime(fmt)
def infos(self, values=('date', 'name', 'full_name', 'urls')):
"""Return a dict describing the object."""
values = set(values)
ret = {p: v for p, v in self.__dict__.items() if p in values}
return ret
def download(self, quality, directory=None):
"""Download the video."""
url = self.urls[quality]
directory = directory or '.'
dl_name = '{name}_{quality}.mp4'
dl_name = dl_name.format(name=self.full_name, quality=quality)
dl_name = os.path.join(directory, dl_name)
cmd = ['wget', '--continue', '-O', dl_name, url]
LOGGER.info(' '.join(cmd))
subprocess.call(cmd)
@staticmethod
def _extract_videos(vsr, media='mp4', lang='FR'):
videos = {}
for video in vsr.values():
if video['mediaType'] != media:
continue
if video['versionShortLibelle'] != lang:
continue
videos[video['VQU']] = video['url']
return videos
@staticmethod
def _short_id(video_id):
"""Return short id used for jon entry.
>>> Plus7Program._short_id('058941-007-A')
'058941-007'
"""
return '-'.join(video_id.split('-')[0:2])
@classmethod
def by_url(cls, url):
"""Return Plus7Program for given `url`."""
video_id = cls._id_from_url(url)
return Plus7Program(video_id)
@staticmethod
def _id_from_url(url):
"""Extract video id from url.
>>> Plus7Program._id_from_url(
... 'http://www.arte.tv/guide/de/055969-002-A/tracks?autoplay=1')
'055969-002-A'
>>> Plus7Program._id_from_url(
... 'http://www.arte.tv/guide/fr/055900-002-A/trop-xx?autoplay=1')
'055900-002-A'
>>> Plus7Program._id_from_url(
... 'http://www.arte.tv/guide/fr/058941-008/tracks')
'058941-008'
"""
url = re.sub(r'\?.*', '', url)
video_id = url.split('/')[-2]
return video_id
class ArtePlus7(object):
"""ArtePlus7 helps using arte website."""
PROGRAMS_JSON_URL = 'http://www.arte.tv/guide/fr/plus7.json'
PROGRAMS = {
'tracks': 'Tracks',
'karambolage': 'Karambolage',
'xenius': 'X:enius',
}
PROGRAMS_SEARCH = 'http://www.arte.tv/fr/search/?q={0}'
@classmethod
def search(cls, search_str):
"""Search program with given `search_str`.
It will be passed directly as a search query string
"""
LOGGER.info('Searching %s', search_str)
url = cls.PROGRAMS_SEARCH.format(search_str)
page = page_read(url)
program_dict = cls._programs_dict_from_page(page)
programs = []
for program in program_dict['programs']:
try:
prog = Plus7Program(program['id'])
except ValueError as err:
# Ignore 'previews' or 'outdated'
LOGGER.debug('Error while reading program: %r', err)
else:
programs.append(prog)
programs.sort(key=lambda p: p.timestamp, reverse=True)
return programs
@classmethod
def program(cls, program):
"""Search program and select only results that are named 'program'."""
search_str = ArtePlus7.PROGRAMS[program]
all_programs = cls.search(search_str)
programs = [p for p in all_programs if p.name == program]
return programs
@staticmethod
def _programs_dict_from_page(page):
"""Return programs dict from page.
Programs dict is stored as a JSON in attribute 'data-results'
from id='search-container' div.
<div
id="search-container"
data-results="{...PROGRAMS_DICT_JSON...}"
"""
soup = page_soup(page)
tag = soup.find(id='search-container')
programs = json.loads(tag.attrs['data-results'])
return programs
def parser():
""" arte_plus_7 parser """
_parser = argparse.ArgumentParser(
description=u'ArtePlus7 videos download')
_parser.add_argument('-v', '--verbose', action='store_true', default=False)
_parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
vid_parser = _parser.add_mutually_exclusive_group(required=True)
vid_parser.add_argument('-u', '--url',
help=u'Arte page to download video from')
vid_parser.add_argument('-p', '--program',
choices=ArtePlus7.PROGRAMS.keys(),
help=u'Download given program')
vid_parser.add_argument('-s', '--search', help=u'Search programs')
_parser.add_argument(
'-n', '--num-programs', type=int, default=1,
help=u'Specify number of programs to download (-1 for all).')
_parser.add_argument('-q', '--quality',
choices=(u'MQ', u'HQ', u'EQ', u'SQ'),
help=u'Video quality to download')
_parser.add_argument('-d', '--download-directory', default='.',
help=u'Directory where to save file')
return _parser
def main():
""" arte_plus_7 main function """
opts = parser().parse_args()
if opts.verbose:
LOGGER.setLevel(logging.DEBUG)
# Get programs
if opts.url:
programs = [Plus7Program.by_url(opts.url)]
elif opts.program:
programs = ArtePlus7.program(opts.program)
elif opts.search:
programs = ArtePlus7.search(opts.search)
else:
raise ValueError('Invalid option, should be url, program or search')
# Nothing found
if not programs:
LOGGER.error('Error: No videos found')
exit(1)
num_progs = len(programs) if opts.num_programs == -1 else opts.num_programs
LOGGER.info('Found %d videos, using %d', len(programs), num_progs)
programs = programs[0:num_progs]
# Iterate over programs selection
for program in programs:
if opts.quality is not None:
program.download(opts.quality, opts.download_directory)
else:
print(json.dumps(program.infos(), indent=4, sort_keys=True))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.services.types import asset_set_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AssetSetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AssetSetServiceGrpcTransport
class AssetSetServiceClientMeta(type):
"""Metaclass for the AssetSetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AssetSetServiceTransport]]
_transport_registry["grpc"] = AssetSetServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AssetSetServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AssetSetServiceClient(metaclass=AssetSetServiceClientMeta):
"""Service to manage asset set"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AssetSetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AssetSetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AssetSetServiceTransport:
"""Return the transport used by the client instance.
Returns:
AssetSetServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def asset_set_path(customer_id: str, asset_set_id: str,) -> str:
"""Return a fully-qualified asset_set string."""
return "customers/{customer_id}/assetSets/{asset_set_id}".format(
customer_id=customer_id, asset_set_id=asset_set_id,
)
@staticmethod
def parse_asset_set_path(path: str) -> Dict[str, str]:
"""Parse a asset_set path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/assetSets/(?P<asset_set_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AssetSetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the asset set service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AssetSetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AssetSetServiceTransport):
# transport is a AssetSetServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AssetSetServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def mutate_asset_sets(
self,
request: Union[asset_set_service.MutateAssetSetsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[asset_set_service.AssetSetOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> asset_set_service.MutateAssetSetsResponse:
r"""Creates, updates or removes asset sets. Operation
statuses are returned.
Args:
request (Union[google.ads.googleads.v9.services.types.MutateAssetSetsRequest, dict]):
The request object. Request message for
[AssetSetService.MutateAssetSets][google.ads.googleads.v9.services.AssetSetService.MutateAssetSets].
customer_id (:class:`str`):
Required. The ID of the customer
whose asset sets are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.AssetSetOperation]`):
Required. The list of operations to
perform on individual asset sets.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateAssetSetsResponse:
Response message for an asset set
mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a asset_set_service.MutateAssetSetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, asset_set_service.MutateAssetSetsRequest):
request = asset_set_service.MutateAssetSetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_asset_sets
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AssetSetServiceClient",)
|
|
import datetime
from django.test import TestCase
from django import template
import ttag
from ttag.tests.setup import tags, models
template.add_to_builtins(tags.__name__)
def render(contents, extra_context=None):
return template.Template(contents).render(template.Context(extra_context))
class TagExecutionTests(TestCase):
def test_default(self):
"""
A tag with named arguments works with or without the argument as long
as a default value is set.
"""
self.assertEqual(render('{% named_arg %}'),
'The limit is %d' %
tags.NamedArg._meta.args['limit'].default)
# Reset the limit tag to have no default (and be required, which is set
# to False if a default was given.
tags.NamedArg._meta.args['limit'].default = None
tags.NamedArg._meta.args['limit'].required = True
self.assertRaises(template.TemplateSyntaxError, render,
'{% named_arg %}')
def test_named(self):
"""
Standard named argument syntax is ``{% tag arg value %}``
"""
self.assertEqual(render('{% named_arg limit 200 %}'),
'The limit is 200')
self.assertRaises(template.TemplateSyntaxError, template.Template,
'{% named_arg limit=25 %}')
def test_named_keyword(self):
self.assertEqual(render('{% named_keyword_arg limit=100 %}'),
'The limit is 100')
self.assertRaises(template.TemplateSyntaxError, template.Template,
"{% named_keyword_arg limit 15 %}")
def test_handle_args(self):
"""tags with no arguments take no arguments"""
self.assertRaises(template.TemplateSyntaxError, template.Template,
'{% no_argument this fails %}')
def test_constant_tag(self):
"""tags with no arguments take no arguments"""
self.assertEqual(render('{% constant 1 to 2 %}'), '1 - 2')
self.assertRaises(template.TemplateSyntaxError, template.Template,
'{% constant 1 t 2 %}', {'t': 'to'})
def test_block_tag(self):
"""tags with a block meta option"""
self.assertEqual(render('{% repeat 5 %}a{% done %}'), 'aaaaa')
def test_multiple_block_tag(self):
"""tags with a block meta option"""
self.assertEqual(render('{% repeat_with_empty 5 %}a{% empty %}b{% stop %}'), 'aaaaa')
self.assertEqual(render('{% repeat_with_empty 0 %}a{% empty %}b{% stop %}'), 'b')
def build_invalid_positional_optional():
class Tag(ttag.Tag):
start = ttag.Arg(required=False)
end = ttag.Arg()
class PositionalTest(TestCase):
def test_positional(self):
"""
Test that positional arguments work.
"""
self.assertEqual(render('{% positional 10 %}'), u"10")
self.assertRaises(template.TemplateSyntaxError, render,
'{% positional %}')
self.assertRaises(template.TemplateSyntaxError, render,
'{% positional limit 10 %}')
def test_positional_mixed(self):
"""
Test that positional arguments work, mixed with named arguments.
"""
self.assertEqual(render('{% positional_mixed 1 as a%}x{{ a }}'), 'x1')
self.assertEqual(render('{% positional_mixed var as a%}x{{ a }}',
{'var': '2'}), 'x2')
def test_positional_mixedkw(self):
"""
Test that positional arguments work, mixed with keyword arguments.
"""
self.assertEqual(render('{% positional_mixedkw 1 default=9 %}'), '1')
self.assertEqual(render('{% positional_mixedkw var default=9 %}',
{'var': None}), '9')
self.assertEqual(render('{% positional_mixedkw default=9 %}'), '9')
def test_positional_optional(self):
"""
Test that optional positional arguments work.
"""
self.assertEqual(render('{% positional_optional 2 %}'), '0,1')
self.assertEqual(render('{% positional_optional_mixed 10 step 2 %}'),
'0,2,4,6,8')
def test_optional_last(self):
"""
Test that an error is raised if optional positional arguments are
followed by required ones.
"""
self.assertRaises(template.TemplateSyntaxError,
build_invalid_positional_optional)
class TestArgumentTypes(TestCase):
def test_model_instance_arg(self):
content = '{% argument_type url object %}'
object = models.Link(url='http://bing.com')
self.assertEqual(render(content, {'object': object}),
unicode(object))
self.assertRaises(ttag.TagValidationError, render, content,
{'object': int()})
# Fail if the variable isn't in the context.
self.assertRaises(ttag.TagValidationError, render, content)
def test_integer_arg(self):
self.assertEqual(render('{% argument_type age 101 %}'), '101')
self.assertEqual(render('{% argument_type age a %}', {'a': 99}), '99')
# IntegerArg.clean calls int(value), so string integers should be
# converted.
self.assertEqual(render('{% argument_type age "23" %}'), '23')
self.assertEqual(render('{% argument_type age a %}', {'a': '7'}), '7')
# Fail if value or variable can't be resolved as an integer.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type age "7b" %}')
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type age age %}', {'age': 'NaN'})
# Fail if the variable isn't in the context.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type age age %}')
def test_string_arg(self):
# Ensure both single quotes and double quotes work.
self.assertEqual(render('{% argument_type name "alice" %}'), 'alice')
self.assertEqual(render("{% argument_type name 'bob' %}"), 'bob')
# Ensure a context variable works.
self.assertEqual(render("{% argument_type name dave %}",
{'dave': 'Dave'}),
'Dave')
# Values are cast to unicode.
class Name(object):
def __init__(self, name):
self.name = name
def __unicode__(self):
return self.name
self.assertEqual(render('{% argument_type name dave %}',
{'dave': Name('dave')}), 'dave')
# Fail if the variable isn't in the context.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type name dave %}')
def test_datetime_arg(self):
self.assertEqual(render('{% argument_type datetime dt %}',
{'dt': datetime.datetime(2010, 1, 9,
22, 33, 47)}),
'2010-01-09 22:33:47')
# Fail if variable isn't a datetime.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type datetime dt %}', {'dt': 'NaN'})
# Fail if the variable isn't in the context.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type datetime dt %}')
def test_date_arg(self):
self.assertEqual(render('{% argument_type date d %}',
{'d': datetime.date(2010, 1, 9)}),
'2010-01-09')
# Fail if variable isn't a datetime.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type date d %}', {'d': 'NaN'})
# Fail if the variable isn't in the context.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type date d %}')
def test_time_arg(self):
self.assertEqual(render('{% argument_type time t %}',
{'t': datetime.time(22, 33, 47)}),
'22:33:47')
# Fail if variable isn't a datetime.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type time t %}', {'t': 'NaN'})
# Fail if the variable isn't in the context.
self.assertRaises(ttag.TagValidationError, render,
'{% argument_type time t %}')
def test_flag_arg(self):
self.assertEqual(render('{% argument_type %}'), '')
self.assertEqual(render('{% argument_type flag %}'), 'flag_is_set')
def test_multi_arg(self):
self.assertEqual(render('{% dot_combine 1 2 3 4 %}'), '1.2.3.4')
self.assertEqual(
render('{% dot_combine_default "a" b "c" default "X" %}',
{'b': None}), 'a.X.c')
class KeywordsArgTest(TestCase):
compact_kwargs = ' "file.html" with foo=x bar=2 %}'
verbose_kwargs = ' "file.html" with x as foo and 2 as bar %}'
mixed_kwargs = ' "file.html" with bar=2 x as foo and baz=3 %}'
def test_not_required(self):
self.assertEqual(render('{% include_compact "file.html" %}'),
'including file.html')
self.assertEqual(render('{% include_verbose "file.html" %}'),
'including file.html')
self.assertEqual(render('{% include_mixed "file.html" %}'),
'including file.html')
def test_compact(self):
self.assertEqual(
render('{% include_compact' + self.compact_kwargs, {'x': 1}),
'including file.html with bar = 2 and foo = 1'
)
def test_compact_invalid(self):
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_compact "file.html" with foo=1 and bar=2 %}'
)
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_compact' + self.verbose_kwargs
)
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_compact' + self.mixed_kwargs
)
def test_verbose(self):
self.assertEqual(
render('{% include_verbose' + self.verbose_kwargs, {'x': 1}),
'including file.html with bar = 2 and foo = 1'
)
def test_verbose_invalid(self):
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_verbose' + self.compact_kwargs
)
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_verbose' + self.mixed_kwargs
)
def test_mixed(self):
self.assertEqual(
render('{% include_mixed' + self.mixed_kwargs, {'x': 1}),
'including file.html with bar = 2 and baz = 3 and foo = 1'
)
def test_duplicate_key(self):
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_compact "file.html" with foo=1 foo=2 %}'
)
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_verbose "file.html" with 1 as foo and 2 as foo %}'
)
self.assertRaises(
template.TemplateSyntaxError, render,
'{% include_mixed "file.html" with foo=1 2 as foo %}'
)
def test_loop(self):
self.assertEqual(render('|{% for i in "abc" %}{% keywords_echo '
'test=forloop.counter %}|{% endfor %}'),
'|test: 1|test: 2|test: 3|')
|
|
from general.utils import format_precision
from decimal import Decimal
def get_min(grid_cells, field):
min = getattr(grid_cells[0], field)
for gc in grid_cells:
if getattr(gc, field) < min:
min = getattr(gc, field)
return min
def get_max(grid_cells, field):
max = getattr(grid_cells[0], field)
for gc in grid_cells:
if getattr(gc, field) > max:
max = getattr(gc, field)
return max
def get_range(grid_cells, field):
min = getattr(grid_cells[0], field)
max = getattr(grid_cells[0], field)
for gc in grid_cells:
if getattr(gc, field) < min:
min = getattr(gc, field)
if getattr(gc, field) > max:
max = getattr(gc, field)
return min, max
def get_value_count(grid_cells, field, value):
count = 0
for gc in grid_cells:
if getattr(gc, field) == value:
count += 1
return count
def get_sum(grid_cells, field):
sum_val = 0
for gc in grid_cells:
if getattr(gc, field):
sum_val += getattr(gc, field)
return sum_val
def get_average(grid_cells, field):
cell_count = grid_cells.count()
if cell_count == 0:
return 0
sum = get_sum(grid_cells, field)
return sum / cell_count
def get_adjusted_average(grid_cells, field):
cell_count = grid_cells.count()
if cell_count == 0:
return 0
sum_val = Decimal(0)
total_area = Decimal(0)
for gc in grid_cells:
try:
cell_area = gc.geometry.area
cell_val = getattr(gc, field)
sum_val += Decimal(cell_val * Decimal(cell_area))
total_area += Decimal(cell_area)
except:
# In case getattr fails
pass
return sum_val / total_area
def get_unique_values(grid_cells, field):
values = []
for gc in grid_cells:
value = getattr(gc, field)
if value not in values:
values.append(value)
return values
def format_area(value, raw):
if raw:
return str(float(value)) + ' sq mi'
else:
return str(format_precision(float(value), 0)) + ' sq mi'
def get_drawing_summary_reports(grid_cells, attributes, raw=False):
from general.utils import sq_meters_to_sq_miles
if grid_cells.count() == 0:
attributes.append({'title': 'Total Area', 'data': '0 sq mi'})
attributes.append({'title': 'Total Area Closed', 'data': '0 sq mi'})
attributes.append({'title': 'Total Area Reopened', 'data': '0 sq mi'})
attributes.append({'title': 'Soft', 'data': '0 sq mi'})
attributes.append({'title': 'Mixed', 'data': '0 sq mi'})
attributes.append({'title': 'Hard', 'data': '0 sq mi'})
attributes.append({'title': 'Inferred Rock', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 1 for all coral and sponges', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 2 for all coral and sponges', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 3 for all coral and sponges', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 4 for all coral and sponges', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 1 for Scleractinia coral', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 2 for Scleractinia coral', 'data': '0 sq mi'})
attributes.append({'title': 'PHS 3 for Scleractinia coral', 'data': '0 sq mi'})
return
# # Number of Grid Cells
# cell_count = grid_cells.count()
# attributes.append({'title': 'Number of Grid Cells (drawing)', 'data': format(cell_count, ',d')})
#
# Total Area
if not any(x['title'] == 'Total Area' for x in attributes):
title = 'Total Area'
area = sq_meters_to_sq_miles(sum([x.geometry.transform(2163, clone=True).area for x in grid_cells]))
data = format_area(area, raw)
attributes.append({'title': title, 'data': data})
# Depth Range
if not any(x['title'] == 'Depth Range' for x in attributes):
title = 'Depth Range'
min_depth = get_min(grid_cells, 'depth')
max_depth = get_max(grid_cells, 'depth')
depth_range = '%s to %s fathoms' % (format_precision(float(min_depth), 0), format_precision(float(max_depth), 0))
attributes.append({'title': title, 'data': depth_range})
# Mean Depth
if not any(x['title'] == 'Mean Depth' for x in attributes):
title = 'Mean Depth'
mean_depth = get_average(grid_cells, 'depth')
data = str(format_precision(float(mean_depth), 0)) + ' fathoms'
attributes.append({'title': title, 'data': data})
# Soft Substrate (Area)
title = 'Soft'
soft_sub_area = get_sum(grid_cells, 'sft_sub_m2')
data = format_area(soft_sub_area, raw)
attributes.append({'title': title, 'data': data})
# Mixed Substrate (Area)
title = 'Mixed'
mixed_sub_area = get_sum(grid_cells, 'mix_sub_m2')
data = format_area(mixed_sub_area, raw)
attributes.append({'title': title, 'data': data})
# Hard Substrate (Area)
title = 'Hard'
hard_sub_area = get_sum(grid_cells, 'hrd_sub_m2')
data = format_area(hard_sub_area, raw)
attributes.append({'title': title, 'data': data})
# Inferred Rocky Substrate (Area)
title = 'Inferred Rock'
rock_sub_area = get_sum(grid_cells, 'rck_sub_m2')
data = format_area(rock_sub_area, raw)
attributes.append({'title': title, 'data': data})
# Class 1 Suitable Habitat (All)
title = 'PHS 1 for all coral and sponges'
hsall1_m2 = get_sum(grid_cells, 'hsall1_m2')
data = format_area(hsall1_m2, raw)
attributes.append({'title': title, 'data': data})
# Class 2 Suitable Habitat (All)
title = 'PHS 2 for all coral and sponges'
hsall2_m2 = get_sum(grid_cells, 'hsall2_m2')
data = format_area(hsall2_m2, raw)
attributes.append({'title': title, 'data': data})
# Class 3 Suitable Habitat (All)
title = 'PHS 3 for all coral and sponges'
hsall3_m2 = get_sum(grid_cells, 'hsall3_m2')
data = format_area(hsall3_m2, raw)
attributes.append({'title': title, 'data': data})
# Class 4 Suitable Habitat (All)
title = 'PHS 4 for all coral and sponges'
hsall4_m2 = get_sum(grid_cells, 'hsall4_m2')
data = format_area(hsall4_m2, raw)
attributes.append({'title': title, 'data': data})
# Class 1 Suitable Habitat (Scleractinia)
title = 'PHS 1 for Scleractinia coral'
hssclr1_m2 = get_sum(grid_cells, 'hssclr1_m2')
data = format_area(hssclr1_m2, raw)
attributes.append({'title': title, 'data': data})
# Class 2 Suitable Habitat (Scleractinia)
title = 'PHS 2 for Scleractinia coral'
hssclr2_m2 = get_sum(grid_cells, 'hssclr2_m2')
data = format_area(hssclr2_m2, raw)
attributes.append({'title': title, 'data': data})
# Class 3 Suitable Habitat (Scleractinia)
title = 'PHS 3 for Scleractinia coral'
hssclr3_m2 = get_sum(grid_cells, 'hssclr3_m2')
data = format_area(hssclr3_m2, raw)
attributes.append({'title': title, 'data': data})
def get_summary_reports(grid_cells, attributes):
if grid_cells.count() == 0:
return
# Number of Grid Cells
# cell_count = grid_cells.count()
# attributes.append({'title': 'Number of Grid Cells', 'data': format(cell_count, ',d')})
# Depth Range
min_depth = get_min(grid_cells, 'min_fthm')
max_depth = get_max(grid_cells, 'max_fthm')
depth_range = '%s to %s fathoms' % (format_precision(float(min_depth), 0), format_precision(float(max_depth), 0))
attributes.append({'title': 'Depth Range', 'data': depth_range})
# Mean Depth
title = 'Mean Depth'
mean_depth = get_average(grid_cells, 'mean_fthm')
data = str(format_precision(float(mean_depth), 0)) + ' fathoms'
attributes.append({'title': title, 'data': data})
|
|
'''
sqlite3 shell modeled after sqlite3 command-line.
Created by: Chris Houser (Briarfox)
Use sqlite ?file? to open a database in the shell.
You can pass params to run one command. ex. sqlite test.db .dump > test.sql
'''
import sqlite3
import os
import cmd
import sys
class SqliteCMD(cmd.Cmd):
'''
Simple sqlite3 shell
'''
prompt = 'sqlite3>'
def __init__(self, db=None):
cmd.Cmd.__init__(self)
self.database = db or ':memory:'
self.separator = '|'
self.conn = sqlite3.connect(self.database)
self.conn.row_factory = sqlite3.Row
self.cur = self.conn.cursor()
self.commands = []
self.headers = True
self.output = sys.stdout
def preloop(self):
print 'sqlite3 version %s' % sqlite3.sqlite_version
print '.(dot) is used for all none sql commands.'
print 'Use .help for non sqlite command list'
print 'All sql commands must end with ;'
if self.database == ':memory:':
print 'Using database :memory:\nuse .open ?file? to open a database'
else:
print 'Using databasse: %s' % self.database
def do_exit(self,*args):
'''Exit shell'''
return True
def emptyline(self):
pass
def command_list(self, command):
if ';' in command:
SqliteCMD.prompt = 'sqlite3>'
self.commands.append(command)
rtn = ' '.join(self.commands)
self.commands = []
return rtn
else:
self.commands.append(command)
SqliteCMD.prompt = '>>>'
return False
def display(self, line):
if self.output == sys.stdout:
print line
else:
with open(self.output, 'a+') as f:
f.write(line+'\n')
def do_output(self, line):
'''.output ?file?
Set output to a file default: stdout'''
self.output = sys.stdout if line == 'stdout' else line
def do_separator(self, separator):
"""Set the separator, default: |"""
self.separator = separator
def do_headers(self,state):
'''.headers ?on|off?
Turn headers on or off, default: on'''
self.headers = state.lower() == 'on'
def do_dump(self, line):
'''.dump ?table?
Dumps a database into a sql string
If table is specified, dump that table.
'''
try:
if not line:
for row in self.conn.iterdump():
self.display(row)
else:
conn = sqlite3.connect(':memory:')
cu = conn.cursor()
cu.execute("attach database '" + self.database + "' as attached_db")
cu.execute("select sql from attached_db.sqlite_master "
"where type='table' and name='" + line + "'")
sql_create_table = cu.fetchone()[0]
cu.execute(sql_create_table);
cu.execute("insert into " + line +
" select * from attached_db." + line)
conn.commit()
cu.execute("detach database attached_db")
self.display("\n".join(conn.iterdump()))
except:
print 'Invalid table specified'
def do_backup(self, line):
'''.backup ?DB? FILE
Backup DB (default "main") to FILE'''
with open(self.detabase, 'rb') as f:
with open(line, 'wb') as new_db:
new_db.write(f.read())
def do_clone(self, line):
'''.clone NEWDB
Clone data into NEWDB from the existing database'''
if not os.path.isfile(line):
try:
conn = sqlite3.connect(line)
cur = conn.cursor()
cur.executescript('\n'.join(self.conn.iterdump()))
print "Switched to database: %s" % line
self.conn = conn
self.cur = cur
except sqlite3.Error, e:
print 'There was an error with the clone %s' % e.args[0]
def do_open(self, line):
''' .open ?FILENAME?
Close existing database and reopen FILENAME
'''
if line:
self.database = line
self.conn = sqlite3.connect(line)
self.conn.row_factory = sqlite3.Row
self.cur = self.conn.cursor()
def do_read(self, line):
''' .read FILENAME
Execute SQL in FILENAME
'''
if line:
if os.path.isfile(line):
with open(line,'r') as f:
self.cur.executescript(f.read())
self.conn.commit()
def do_schema(self, line):
''' .schema ?TABLE?
Show the CREATE statements
If TABLE specified, only show tables matching
LIKE pattern TABLE.
'''
try:
res = self.cur.execute("SELECT * FROM sqlite_master ORDER BY name;")
if not line:
for row in res:
self.display(row['sql'])
else:
for row in res:
if row['tbl_name'] == line:
self.display(row['sql'])
except:
pass
def do_tables(self, line):
''' .tables
List names of tables
'''
res = self.cur.execute("SELECT * FROM sqlite_master ORDER BY name;")
self.display(' '.join([a['tbl_name'] for a in res]))
def onecmd(self, line):
"""Mostly ripped from Python's cmd.py"""
if line[:1] == '.':
cmd, arg, line = self.parseline(line[1:])
else:
cmd = None
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def format_print(self, result):
if self.headers:
headers = [header[0] for header in self.cur.description]
self.display(self.separator.join(headers))
for field in result:
self.display(self.separator.join(str(x) for x in field))
def default(self, line):
try:
rtn = self.command_list(line)
if rtn:
self.cur.execute(rtn)
self.conn.commit()
if rtn.lstrip().upper().startswith('SELECT') or rtn.lstrip().upper().startswith('PRAGMA'):
self.format_print(self.cur.fetchall())
except sqlite3.Error, e:
print e
print 'An Error occured:', e.args[0]
def do_EOF(self, line):
return True
if __name__ == '__main__':
#sqlitedb = SqliteCMD()
if len(sys.argv) == 2:
SqliteCMD(sys.argv[1]).cmdloop()
elif len(sys.argv) > 2:
SqliteCMD(sys.argv[1]).onecmd(sys.argv[2])
else:
SqliteCMD().cmdloop()
|
|
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed Blockly language blocks.
# arduino_compressed.js: The compressed Arduino generator.
# javascript_compressed.js: The compressed Javascript generator.
# python_compressed.js: The compressed Python generator.
# dart_compressed.js: The compressed Dart generator.
# lua_compressed.js: The compressed Lua generator.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
import errno, glob, fnmatch, httplib, json, os, re, subprocess, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
target_filename = 'blockly_uncompressed.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('../closure-library/closure/goog/bootstrap/nodejs');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('../closure-library/closure/goog/bootstrap/nodejs');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/hacking/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT()
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
self.gen_core()
self.gen_blocks()
self.gen_generator("arduino")
self.gen_generator("javascript")
self.gen_generator("python")
self.gen_generator("php")
self.gen_generator("dart")
self.gen_generator("lua")
def gen_core(self):
target_filename = "blockly_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("core", "blockly.js")])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, [])
def gen_blocks(self):
target_filename = "blocks_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Blocks');"))
params.append(("js_code", "goog.provide('Blockly.Types');"))
filenames = []
for root, folders, files in os.walk("blocks"):
for filename in fnmatch.filter(files, "*.js"):
filenames.append(os.path.join(root, filename))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = ["var Blockly={Blocks:{}};", "Blockly.Types={};"]
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + "_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Generator');"))
params.append(("js_code", "goog.provide('Blockly.StaticTyping');"))
filenames = glob.glob(
os.path.join("generators", language, "*.js"))
filenames.insert(0, os.path.join("generators", language + ".js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
filenames.insert(0, "[goog.provide]")
# Remove Blockly.Generator to be compatible with Blockly.
remove = ["var Blockly={Generator:{}};", "Blockly.StaticTyping={};"]
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("closure-compiler.appspot.com")
conn.request("POST", "/compile", urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith("Input_"):
return "???"
n = int(name[6:]) - 1
return filenames[n]
if json_data.has_key("serverErrors"):
errors = json_data["serverErrors"]
for error in errors:
print("SERVER ERROR: %s" % target_filename)
print(error["error"])
elif json_data.has_key("errors"):
errors = json_data["errors"]
for error in errors:
print("FATAL ERROR")
print(error["error"])
if error["file"]:
print("%s at line %d:" % (
file_lookup(error["file"]), error["lineno"]))
print(error["line"])
print((" " * error["charno"]) + "^")
sys.exit(1)
else:
if json_data.has_key("warnings"):
warnings = json_data["warnings"]
for warning in warnings:
print("WARNING")
print(warning["warning"])
if warning["file"]:
print("%s at line %d:" % (
file_lookup(warning["file"]), warning["lineno"]))
print(warning["line"])
print((" " * warning["charno"]) + "^")
print()
if not json_data.has_key("compiledCode"):
print("FATAL ERROR: Compiler did not return compiledCode.")
sys.exit(1)
code = HEADER + "\n" + json_data["compiledCode"]
for code_statement in remove:
code = code.replace(code_statement, "")
# Trim down Google's Apache licences.
# The Closure Compiler used to preserve these until August 2015.
# Delete this in a few months if the licences don't return.
LICENSE = re.compile("""/\\*
[\w ]+
(Copyright \\d+ Google Inc.)
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 \(the "License"\);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\\*/""")
code = re.sub(LICENSE, r"\n// \1 Apache License 2.0", code)
stats = json_data["statistics"]
original_b = stats["originalSize"]
compressed_b = stats["compressedSize"]
if original_b > 0 and compressed_b > 0:
f = open(target_filename, "w")
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print("SUCCESS: " + target_filename)
print("Size changed from %d KB to %d KB (%d%%)." % (
original_kb, compressed_kb, ratio))
else:
print("UNKNOWN ERROR")
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self):
threading.Thread.__init__(self)
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError as e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print("Source file missing: " + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print("Error checking file creation times: " + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if self._rebuild([os.path.join("msg", "messages.js")],
[os.path.join("msg", "json", f) for f in
["en.json", "qqq.json", "synonyms.json"]]):
try:
subprocess.check_call([
"python",
os.path.join("i18n", "js_to_json.py"),
"--input_file", "msg/messages.js",
"--output_dir", "msg/json/",
"--quiet"])
except (subprocess.CalledProcessError, OSError) as e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print("Error running i18n/js_to_json.py: ", e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
"python",
os.path.join("i18n", "create_messages.py"),
"--source_lang_file", os.path.join("msg", "json", "en.json"),
"--source_synonym_file", os.path.join("msg", "json", "synonyms.json"),
"--key_file", os.path.join("msg", "json", "keys.json"),
"--output_dir", os.path.join("msg", "js"),
"--quiet"]
json_files = glob.glob(os.path.join("msg", "json", "*.json"))
json_files = [file for file in json_files if not
(file.endswith(("keys.json", "synonyms.json", "qqq.json")))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError) as e:
print("Error running i18n/create_messages.py: ", e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain "json".
f = f.replace("json", "js")
if os.path.isfile(f):
print("SUCCESS: " + f)
else:
print("FAILED to create " + f)
if __name__ == "__main__":
try:
calcdeps = import_path(os.path.join(
os.path.pardir, "closure-library", "closure", "bin", "calcdeps.py"))
except ImportError:
if os.path.isdir(os.path.join(os.path.pardir, "closure-library-read-only")):
# Dir got renamed when Closure moved from Google Code to GitHub in 2014.
print("Error: Closure directory needs to be renamed from"
"'closure-library-read-only' to 'closure-library'.\n"
"Please rename this directory.")
elif os.path.isdir(os.path.join(os.path.pardir, "google-closure-library")):
# When Closure is installed by npm, it is named "google-closure-library".
#calcdeps = import_path(os.path.join(
# os.path.pardir, "google-closure-library", "closure", "bin", "calcdeps.py"))
print("Error: Closure directory needs to be renamed from"
"'google-closure-library' to 'closure-library'.\n"
"Please rename this directory.")
else:
print("""Error: Closure not found. Read this:
https://developers.google.com/blockly/hacking/closure""")
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
["core", os.path.join(os.path.pardir, "closure-library")])
# Run both tasks in parallel threads.
# Uncompressed is limited by processor speed.
# Compressed is limited by network and server speed.
Gen_uncompressed(search_paths).start()
Gen_compressed(search_paths).start()
# This is run locally in a separate thread.
Gen_langfiles().start()
|
|
#
# cocos2d
# http://cocos2d.org
#
# Particle Engine done by Phil Hassey
# http://www.imitationpickles.org
#
from __future__ import division, print_function, unicode_literals
import six
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pyglet
from pyglet.gl import *
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from cocos.actions import *
from cocos.sprite import Sprite
import random; rr = random.randrange
class Fire:
def __init__(self,x,y,vy,frame,size):
self.x,self.y,self.vy,self.frame,self.size = x,y,vy,frame,size
class FireManager( Layer ):
def __init__(self, view_width, num):
super( FireManager, self ).__init__()
self.view_width = view_width
self.goodies = []
self.batch = pyglet.graphics.Batch()
self.fimg = pyglet.resource.image('fire.jpg')
self.group = pyglet.sprite.SpriteGroup(self.fimg.texture,
blend_src=GL_SRC_ALPHA, blend_dest=GL_ONE)
self.vertex_list = self.batch.add(4*num, GL_QUADS, self.group,
'v2i', 'c4B', ('t3f', self.fimg.texture.tex_coords*num))
for n in range(0, num):
f = Fire(0,0,0,0,0)
self.goodies.append(f)
self.vertex_list.vertices[n*8:(n+1)*8] = [0, 0, 0, 0, 0, 0, 0, 0]
self.vertex_list.colors[n*16:(n+1)*16] = [0,0,0,0,] * 4
self.schedule( self.step )
def step(self,dt):
w,h = self.fimg.width,self.fimg.height
fires = self.goodies
verts, clrs = self.vertex_list.vertices, self.vertex_list.colors
for n,f in enumerate(fires):
if not f.frame:
f.x = rr(0,self.view_width)
f.y = rr(-120,-80)
f.vy = rr(40,70)/100.0
f.frame = rr(50,250)
f.size = 8+pow(rr(0.0,100)/100.0,2.0)*32;
f.scale= f.size/32.0
x = f.x = f.x+ rr(-50,50)/100.0
y = f.y = f.y+f.vy*4
c = 3*f.frame/255.0;
r,g,b = (min(255,int(c*0xc2)),min(255,int(c*0x41)),min(255,int(c*0x21)))
f.frame -= 1
ww,hh = w*f.scale,h*f.scale
x-=ww/2
if six.PY2:
vs = map(int,[x,y,x+ww,y,x+ww,y+hh,x,y+hh])
else:
vs = list(map(int,[x,y,x+ww,y,x+ww,y+hh,x,y+hh]))
verts[n*8:(n+1)*8] = vs
clrs[n*16:(n+1)*16] = [r,g,b,255] * 4
def draw( self ):
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
class SpriteLayer ( Layer ):
def __init__( self ):
super( SpriteLayer, self ).__init__()
sprite1 = Sprite('grossini.png' )
sprite2 = Sprite('grossinis_sister1.png')
sprite3 = Sprite('grossinis_sister2.png')
sprite1.position = (320,240)
sprite2.position = (620,100)
sprite3.position = (20,100)
self.add( sprite1 )
self.add( sprite2 )
self.add( sprite3 )
ju_right = JumpBy( (600,0), height=100, jumps=4, duration=5 )
ju_left = JumpBy( (-600,0), height=100, jumps=4, duration=5 )
rot1 = Rotate( 180 * 4, duration=5)
sprite1.opacity = 128
sc = ScaleBy( 9, 5 )
rot = Rotate( 180, 5 )
sprite1.do( Repeat( sc + Reverse(sc) ) )
sprite1.do( Repeat( rot + Reverse(rot) ) )
sprite2.do( Repeat( ju_left + Reverse(ju_left) ) )
sprite2.do( Repeat( Reverse(rot1) + rot1 ) )
sprite3.do( Repeat( ju_right + Reverse(ju_right) ) )
sprite3.do( Repeat( rot1 + Reverse(rot1) ) )
class MainMenu(Menu):
def __init__( self ):
# call superclass with the title
super( MainMenu, self ).__init__("GROSSINI'S SISTERS" )
pyglet.font.add_directory('.')
# you can override the font that will be used for the title and the items
self.font_title['font_name'] = 'You Are Loved'
self.font_title['font_size'] = 72
self.font_item['font_name'] = 'You Are Loved'
self.font_item_selected['font_name'] = 'You Are Loved'
# you can also override the font size and the colors. see menu.py for
# more info
# example: menus can be vertical aligned and horizontal aligned
self.menu_valign = CENTER
self.menu_halign = CENTER
items = []
items.append( MenuItem('New Game', self.on_new_game ) )
items.append( MenuItem('Options', self.on_options ) )
items.append( MenuItem('Scores', self.on_scores ) )
items.append( MenuItem('Quit', self.on_quit ) )
self.create_menu( items, zoom_in(), zoom_out() )
# Callbacks
def on_new_game( self ):
# director.set_scene( StartGame() )
print("on_new_game()")
def on_scores( self ):
self.parent.switch_to( 2 )
def on_options( self ):
self.parent.switch_to( 1 )
def on_quit( self ):
director.pop()
class OptionMenu(Menu):
def __init__( self ):
super( OptionMenu, self ).__init__("GROSSINI'S SISTERS" )
self.font_title['font_name'] = 'You Are Loved'
self.font_title['font_size'] = 72
self.font_item['font_name'] = 'You Are Loved'
self.font_item_selected['font_name'] = 'You Are Loved'
self.menu_valign = BOTTOM
self.menu_halign = RIGHT
items = []
items.append( MenuItem('Fullscreen', self.on_fullscreen) )
items.append( ToggleMenuItem('Show FPS: ', self.on_show_fps, True) )
items.append( MenuItem('OK', self.on_quit) )
self.create_menu( items, shake(), shake_back() )
# Callbacks
def on_fullscreen( self ):
director.window.set_fullscreen( not director.window.fullscreen )
def on_quit( self ):
self.parent.switch_to( 0 )
def on_show_fps( self, value ):
director.show_FPS = value
class ScoreMenu(Menu):
def __init__( self ):
super( ScoreMenu, self ).__init__("GROSSINI'S SISTERS" )
self.font_title['font_name'] = 'You Are Loved'
self.font_title['font_size'] = 72
self.font_item['font_name'] = 'You Are Loved'
self.font_item_selected['font_name'] = 'You Are Loved'
self.menu_valign = BOTTOM
self.menu_halign = LEFT
self.create_menu( [MenuItem('Go Back', self.on_quit)] )
def on_quit( self ):
self.parent.switch_to( 0 )
def init():
director.init( resizable=True, width=640, height=480)
def start():
director.set_depth_test()
firelayer = FireManager(director.get_window_size()[0], 250)
spritelayer = SpriteLayer()
menulayer = MultiplexLayer( MainMenu(), OptionMenu(), ScoreMenu() )
scene =Scene( firelayer, spritelayer, menulayer )
twirl_normal = Twirl( center=(320,240), grid=(16,12), duration=15, twirls=6, amplitude=6 )
twirl = AccelDeccelAmplitude( twirl_normal, rate=4.0 )
lens = Lens3D( radius=240, center=(320,240), grid=(32,24), duration=5)
waves3d = AccelDeccelAmplitude( Waves3D( waves=18, amplitude=80, grid=(32,24), duration=15), rate=4.0 )
flipx = FlipX3D(duration=1)
flipy = FlipY3D(duration=1)
flip = Flip(duration=1)
liquid = Liquid( grid=(16,12), duration=4)
ripple = Ripple3D( grid=(32,24), waves=7, duration=10, amplitude=100, radius=320)
shakyt = ShakyTiles3D( grid=(16,12), duration=3)
corners = CornerSwap( duration=1)
waves = AccelAmplitude(Waves( waves=8, amplitude=50, grid=(32,24), duration=5), rate=2.0)
shaky = Shaky3D( randrange=10, grid=(32,24), duration=5)
quadmove = QuadMoveBy( delta0=(320,240), delta1=(-630,0), delta2=(-320,-240), delta3=(630,0), duration=2 )
fadeout = FadeOutTRTiles( grid=(16,12), duration=2)
cornerup = MoveCornerUp( duration=1)
cornerdown = MoveCornerDown( duration=1)
shatter = ShatteredTiles3D( randrange=16, grid=(16,12), duration=4 )
shuffle = ShuffleTiles( grid=(16,12), duration=1 )
orbit = OrbitCamera( radius=1, delta_radius=2, angle_x=0, delta_x=-90, angle_z=0, delta_z=180, duration=4 )
jumptiles = JumpTiles3D( jumps=2, duration=4, amplitude=80, grid=(16,12) )
wavestiles = WavesTiles3D( waves=3, amplitude=60, duration=8, grid=(16,12) )
turnoff = TurnOffTiles( grid=(16,12), duration=2 )
# firelayer.do(
# spritelayer.do(
# menulayer.do(
scene.do(
Delay(3) +
ripple + Delay(2) +
wavestiles + Delay(1) +
twirl +
liquid + Delay(2) +
shakyt + Delay(2) +
ReuseGrid() +
shuffle + Delay(4) + ReuseGrid() + turnoff + Reverse(turnoff) + Delay(1) +
shatter +
flip+ Delay(2) +
Reverse(flip) +
flipx + Delay(2) + ReuseGrid() +
flipy + Delay(2) + ReuseGrid() +
flipx + Delay(2) + ReuseGrid() +
flipy + Delay(2) +
lens + ReuseGrid() + ( (orbit+Reverse(orbit)) | waves3d) + Delay(1) +
corners + Delay(2) + Reverse(corners) +
waves + Delay(2) + ReuseGrid() + shaky +
jumptiles + Delay(1) +
cornerup + Delay(1) +
Reverse(cornerdown) + Delay(1) +
fadeout + Reverse(fadeout) + Delay(2) +
quadmove + Delay(1) +
Reverse(quadmove) +
StopGrid()
)
scene.do( Delay(10) + OrbitCamera( delta_z=-360*3, duration=10*4 ) )
firelayer.do( Delay(4) + Repeat( RotateBy(360, 10) ) )
return scene
def run(scene):
director.run( scene )
if __name__ == "__main__":
init()
s = start()
run(s)
|
|
import collections
import copy
import datetime
import hashlib
import json
import logging
import os
import re
import shutil
import sys
import time
import urllib.parse as parse
import uuid
import Bio.SeqIO
from Bio.Data import CodonTable
from Bio.Data.CodonTable import TranslationError
from Bio.Seq import Seq
from GenomeFileUtil.core import GenomeUtils
from GenomeFileUtil.core.GenomeInterface import GenomeInterface
from GenomeFileUtil.core.GenomeUtils import is_parent, warnings, \
check_full_contig_length_or_multi_strand_feature
from GenomeFileUtil.core.GenomeUtils import propagate_cds_props_to_gene, load_ontology_mappings
from GenomeFileUtil.core.MiscUtils import validate_lists_have_same_elements
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.DataFileUtilClient import DataFileUtil
codon_table = CodonTable.ambiguous_generic_by_name["Standard"]
strand_table = str.maketrans("1?.", "+++")
snake_re = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
MAX_MISC_FEATURE_SIZE = 10000
def make_snake_case(string):
"""Simple function to convert CamelCase to snake_case"""
return snake_re.sub(r'_\1', string).lower()
class FastaGFFToGenome:
def __init__(self, config):
self.cfg = config
self.au = AssemblyUtil(config.callbackURL)
self.dfu = DataFileUtil(self.cfg.callbackURL)
self.gi = GenomeInterface(self.cfg)
self.taxon_wsname = self.cfg.raw['taxon-workspace-name']
self.time_string = str(datetime.datetime.fromtimestamp(
time.time()).strftime('%Y_%m_%d_%H_%M_%S'))
yml_text = open('/kb/module/kbase.yml').read()
mod_match = re.search(r'module-version:\n\W+(.+)\n', yml_text)
if mod_match:
self.version = mod_match.group(1)
else:
self.version = None
self.ont_mappings = load_ontology_mappings('/kb/module/data')
self.code_table = 11
self.skip_types = ('exon', 'five_prime_UTR', 'three_prime_UTR',
'start_codon', 'stop_codon', 'region', 'chromosome', 'scaffold')
self.spoof_gene_count = 0
self.is_phytozome = False
self.is_metagenome = False
self.strict = True
self.generate_genes = False
self.warnings = [] # type: list
self.feature_dict = collections.OrderedDict() # type: dict
self.cdss = set() # type: set
self.ontologies_present = collections.defaultdict(dict) # type: dict
self.ontology_events = list() # type: list
self.skiped_features = collections.Counter() # type: collections.Counter
self.feature_counts = collections.Counter() # type: collections.Counter
self.re_api_url = config.re_api_url
def warn(self, message):
self.warnings.append(message)
def generate_genome_json(self, params):
# 1) validate parameters
self._validate_import_file_params(params)
self.code_table = params.get('genetic_code', 11)
# 2) construct the input directory staging area
input_directory = os.path.join(self.cfg.sharedFolder, 'fast_gff_upload_' + str(uuid.uuid4()))
os.makedirs(input_directory)
file_paths = self._stage_input(params, input_directory)
# 3) extract out the parameters
params = self._set_parsed_params(params)
if params.get('generate_missing_genes'):
self.generate_genes = True
# 4) do the upload
genome = self._gen_genome_json(params, file_paths["gff_file"], file_paths["fasta_file"])
return genome, input_directory
def import_file(self, params):
self.is_metagenome = params.get('is_metagenome', False)
if self.is_metagenome:
ws_datatype = "KBaseMetagenomes.AnnotatedMetagenomeAssembly"
else:
ws_datatype = "KBaseGenomes.Genome"
genome, input_directory = self.generate_genome_json(params)
json.dump(genome, open(f"{self.cfg.sharedFolder}/{genome['id']}.json", 'w'), indent=4)
result = self.gi.save_one_genome({
'workspace': params['workspace_name'],
'name': params['genome_name'],
'data': genome,
"meta": params.get('metadata', {}),
'workspace_datatype': ws_datatype,
})
feature_types = "\n".join([f"{k}: {v}" for k, v in genome['feature_counts'].items()])
report_string = (
f"A genome with {len(genome['contig_ids'])} contigs and the following feature "
f"types was imported: \n{feature_types}"
)
# XXX report_string is unused except for this log
logging.info(report_string)
# 5) clear the temp directory
shutil.rmtree(input_directory)
# 6) return the result
info = result['info']
prefix = ''
if self.is_metagenome:
prefix = 'meta'
details = {
prefix+'genome_ref': f'{info[6]}/{info[0]}/{info[4]}',
prefix+'genome_info': info
}
return details
def _gen_genome_json(self, params, input_gff_file, input_fasta_file):
# reading in GFF file
features_by_contig = self._retrieve_gff_file(input_gff_file)
contig_ids = set()
# parse feature information
fasta_contigs = Bio.SeqIO.parse(input_fasta_file, "fasta")
logging.info("Scanning contigs")
for contig in fasta_contigs:
molecule_type = str(contig.seq.alphabet).replace(
'IUPACAmbiguous', '').strip('()')
contig_ids.add(contig.id)
for feature in features_by_contig.get(contig.id, []):
self._transform_feature(contig, feature)
for cid in set(features_by_contig.keys()) - contig_ids:
self.warn(f"Sequence name {cid} does not match a sequence id in the FASTA file."
f"{len(features_by_contig[cid])} features will not be imported.")
if self.strict:
raise ValueError("Every feature sequence id must match a fasta sequence id")
prot_fasta_path = f"{self.cfg.sharedFolder}/{params['genome_name']}_protein.fasta"
# if is a metagenome, the following function writes a protein fasta
self._process_cdss(prot_fasta_path)
# save assembly file
'''
Metagenome Changes:
if we want to pass more stuff to AssemblyUtil, do here.
TODO: add flag to save_assembly_from_fasta
'''
if self.is_metagenome:
genome_type = "metagenome"
else:
genome_type = params.get('genome_type', 'isolate')
if params.get('existing_assembly_ref'):
assembly_ref = params['existing_assembly_ref']
ret = self.dfu.get_objects(
{'object_refs': [assembly_ref]}
)['data'][0]
assembly_obj_type = ret['info'][2].split('-')[0]
valid_assembly_types = [
"KBaseGenomeAnnotations.Assembly",
"KBaseGenomes.ContigSet"
]
if assembly_obj_type not in valid_assembly_types:
raise ValueError(f"{assembly_ref} is not a reference to an assembly")
assembly_data = ret['data']
# should do more thorough check of sequences.
if not validate_lists_have_same_elements(
assembly_data['contigs'].keys(),
contig_ids
):
raise ValueError(f"provided assembly with ref {assembly_ref} does not "
"have matching contig ids to provided input fasta.")
logging.info(f"Using supplied assembly: {assembly_ref}")
else:
assembly_ref = self.au.save_assembly_from_fasta(
{'file': {'path': input_fasta_file},
'workspace_name': params['workspace_name'],
'assembly_name': params['genome_name'] + ".assembly",
'type': genome_type,
})
assembly_data = self.dfu.get_objects(
{'object_refs': [assembly_ref],
'ignore_errors': 0})['data'][0]['data']
# generate genome info
genome = self._gen_genome_info(assembly_ref, assembly_data,
input_gff_file, molecule_type,
prot_fasta_path, params)
if self.spoof_gene_count > 0:
self.warn(warnings['spoofed_genome'].format(self.spoof_gene_count))
genome['suspect'] = 1
if self.warnings:
genome['warnings'] = self.warnings
return genome
@staticmethod
def _location(in_feature):
in_feature['strand'] = in_feature['strand'].replace(
"-1", "-").translate(strand_table)
if in_feature['strand'] == '+':
start = in_feature['start']
elif in_feature['strand'] == '-':
start = in_feature['end']
else:
raise ValueError('Invalid feature strand: {}'
.format(in_feature['strand']))
return [
in_feature['contig'],
start,
in_feature['strand'],
in_feature['end'] - in_feature['start'] + 1
]
@staticmethod
def _validate_import_file_params(params):
"""
validate_import_file_params:
validates params passed to FastaGFFToGenome.import_file method
"""
# check for required parameters
for p in ['workspace_name', 'genome_name', 'fasta_file', 'gff_file']:
if p not in params:
raise ValueError(f'"{p}" parameter is required, but missing')
# one and only one of 'path', or 'shock_id' is required
for key in ('fasta_file', 'gff_file'):
file = params[key]
if not isinstance(file, dict):
raise ValueError(f'Required "{key}" field must be a map/dict')
sources = ('path', 'shock_id')
n_valid_fields = sum(1 for f in sources if file.get(f))
logging.info(f"inputs: {n_valid_fields}")
if n_valid_fields < 1:
raise ValueError(f'Required "{key}" field must include one source: '
f'{", ".join(sources)}')
if n_valid_fields > 1:
raise ValueError(f'Required "{key}" field has too many sources specified: '
f'{", ".join(file.keys())}')
if params.get('genetic_code'):
if not (isinstance(params['genetic_code'], int) and 0 < params['genetic_code'] < 32):
raise ValueError("Invalid genetic code specified: {}".format(params))
def _set_parsed_params(self, params):
logging.info('Setting params')
default_params = {
'taxon_wsname': self.cfg.raw['taxon-workspace-name'],
'scientific_name': 'unknown_taxon',
'source': 'User',
'release': None,
'metadata': {},
'source_id': 'unknown',
}
default_params.update(params)
logging.info(json.dumps(default_params, indent=1))
return default_params
def _stage_input(self, params, input_directory):
"""
stage_input: Setup the input_directory by fetching the files and uncompressing if needed
"""
file_paths = dict()
for key in ('fasta_file', 'gff_file'):
file = params[key]
file_path = None
'''
below seems like weird if statement
'''
if file.get('path') is not None:
local_file_path = file['path']
file_path = os.path.join(input_directory, os.path.basename(local_file_path))
logging.info(f'Moving file from {local_file_path} to {file_path}')
# Metagenome Updates
# not sure if we have to be careful about moving the objects
# around
if os.path.isfile(local_file_path):
shutil.copy2(local_file_path, file_path)
else:
raise FileNotFoundError(f"Input {key} file {local_file_path} not found")
err_msg = "Shutil copy unsucessful"
elif file.get('shock_id') is not None:
# handle shock file
logging.info(f'Downloading file from SHOCK node: '
f'{self.cfg.sharedFolder}-{file["shock_id"]}')
sys.stdout.flush()
file_name = self.dfu.shock_to_file({'file_path': input_directory,
'shock_id': file['shock_id']
})['node_file_name']
file_path = os.path.join(input_directory, file_name)
err_msg = "Shock retrieval"
# extract the file if it is compressed
'''
Metagenome Changes:
may have to make check here to see if the the file is too big for
working dir.
'''
if file_path is not None:
logging.info("staged input file =" + file_path)
sys.stdout.flush()
if not os.path.isfile(file_path):
raise FileNotFoundError(f"{file_path} not a file")
dfUtil_result = self.dfu.unpack_file({'file_path': file_path})
file_paths[key] = dfUtil_result['file_path']
err_msg = "DataFielUtil 'unpack_file' function call"
else:
raise ValueError('No valid files could be extracted based on the input')
if not os.path.isfile(file_path):
raise ValueError(f"{err_msg} for {key} file to {file_path}")
return file_paths
def _retrieve_gff_file(self, input_gff_file):
"""
_retrieve_gff_file: retrieve info from gff_file
"""
logging.info("Reading GFF file")
feature_list = collections.defaultdict(list) # type: dict
is_patric = 0
'''
Metagenome Changes:
the lines below iterate through the entire gff input file, which
for a Metagenome may be an issue.
! Only a problem if there are space limits on processing in this
request
'''
logging.info("Parsing GFF file")
for current_line in open(input_gff_file):
if current_line.isspace() or current_line == "" or current_line.startswith("#"):
continue
# Split line
try:
(contig_id, source_id, feature_type, start, end,
score, strand, phase, attributes) = current_line.split('\t')
except ValueError:
raise ValueError(f"unable to parse {current_line}")
''' Do Metagenomes need this phytozome/PATRIC stuff??'''
# Checking to see if Phytozome
if "phytozome" in source_id.lower():
self.is_phytozome = True
# Checking to see if Phytozome
if "PATRIC" in source_id:
is_patric = True
# PATRIC prepends their contig ids with some gibberish
if is_patric and "|" in contig_id:
contig_id = contig_id.split("|", 1)[1]
# Populating basic feature object
ftr: dict = {'contig': contig_id, 'source': source_id,
'type': feature_type, 'start': int(start),
'end': int(end), 'score': score, 'strand': strand,
'phase': phase, 'attributes': collections.defaultdict(list)}
# Populating with attribute key-value pair
# This is where the feature id is from
for attribute in attributes.split(";"):
attribute = attribute.strip()
# Sometimes empty string
if not attribute:
continue
# Use of 1 to limit split as '=' character can also be made available later
# Sometimes lack of "=", assume spaces instead
if "=" in attribute:
key, value = attribute.split("=", 1)
elif " " in attribute:
key, value = attribute.split(" ", 1)
else:
logging.debug(f'Unable to parse {attribute}')
continue
ftr['attributes'][make_snake_case(key)].append(parse.unquote(value.strip('"')))
ftr['attributes']['raw'] = attributes
if "id" in ftr['attributes']:
ftr['ID'] = ftr['attributes']['id'][0]
if "parent" in ftr['attributes']:
ftr['Parent'] = ftr['attributes']['parent'][0]
feature_list[contig_id].append(ftr)
# Some GFF/GTF files don't use "ID" so we go through the possibilities
feature_list = self._add_missing_identifiers(feature_list)
# Most bacterial files have only CDSs
# In order to work with prokaryotic and eukaryotic gene structure synonymously
# Here we add feature dictionaries representing the parent gene and mRNAs
# feature_list = self._add_missing_parents(feature_list)
# Phytozome has the annoying habit of editing their identifiers so we fix them
if self.is_phytozome:
self._update_phytozome_features(feature_list)
# All identifiers need to be checked so that they follow the same general rules
# Rules are listed within the function itself
feature_list = self._update_identifiers(feature_list)
return feature_list
def _add_missing_identifiers(self, feature_list):
logging.info("Adding missing identifiers")
# General rule is to iterate through a range of possibilities if "ID" is missing
for contig in feature_list:
for i, feat in enumerate(feature_list[contig]):
if "ID" not in feature_list[contig][i]:
# all of the following are not guaranteed to be unique ID's
# for key in ("transcriptid", "proteinid", "pacid",
# "parent", "name", 'transcript_id'):
for key in ("protein_id", "name",
"pacid", "parent"):
if key in feature_list[contig][i]['attributes']:
feature_list[contig][i]['ID'] = feature_list[
contig][i]['attributes'][key][0]
break
if feat['type'] not in self.skip_types:
self.feature_counts[feat['type']] += 1
# If the process fails, throw an error
if "ID" not in feature_list[contig][i]:
feat['ID'] = f"{feat['type']}_{self.feature_counts[feat['type']]}"
return feature_list
def _add_missing_parents(self, feature_list):
# General rules is if CDS or RNA missing parent, add them
for contig in feature_list:
ftrs = feature_list[contig]
new_ftrs = []
for i in range(len(ftrs)):
if ftrs[i]["type"] in self.skip_types:
continue
if "Parent" not in ftrs[i]:
# Assuming parent doesn't exist at all, so create de novo instead of trying to find it
if "RNA" in ftrs[i]["type"] or "CDS" in ftrs[i]["type"]:
new_gene_ftr = copy.deepcopy(ftrs[i])
new_gene_ftr["type"] = "gene"
ftrs[i]["Parent"] = new_gene_ftr["ID"]
new_ftrs.append(new_gene_ftr)
if "CDS" in ftrs[i]["type"]:
new_rna_ftr = copy.deepcopy(ftrs[i])
new_rna_ftr["type"] = "mRNA"
new_ftrs.append(new_rna_ftr)
ftrs[i]["Parent"] = new_rna_ftr["ID"]
new_ftrs.append(ftrs[i])
feature_list[contig] = new_ftrs
return feature_list
@staticmethod
def _update_phytozome_features(feature_list):
# General rule is to use the "Name" field where possible
# And update parent attribute correspondingly
for contig in feature_list:
feature_position_dict = {}
for i in range(len(feature_list[contig])):
# Maintain old_id for reference
# Sometimes ID isn't available, so use PACid
old_id = None
for key in ("id", "pacid"):
if key in feature_list[contig][i]['attributes']:
old_id = feature_list[contig][i]['attributes'][key][0]
break
if old_id is None:
continue
# Retain old_id
feature_position_dict[old_id] = i
# Clip off the increment on CDS IDs so fragments of the same
# CDS share the same ID
if "CDS" in feature_list[contig][i]["ID"]:
feature_list[contig][i]["ID"] = feature_list[contig][i]["ID"].rsplit('.', 1)[0]
# In Phytozome, gene and mRNA have "Name" field, CDS do not
if "name" in feature_list[contig][i]['attributes']:
feature_list[contig][i]["ID"] = feature_list[contig][i]['attributes']['name'][0]
if "Parent" in feature_list[contig][i]:
# Update Parent to match new ID of parent ftr
feature_list[contig][i]["Parent"] = feature_list[contig][
feature_position_dict[feature_list[contig][i]["Parent"]]
]["ID"]
return feature_list
def _update_identifiers(self, feature_list):
# General rules:
# 1) Genes keep identifier
# 2) RNAs keep identifier only if its different from gene, otherwise append ".mRNA"
# 3) CDS always uses RNA identifier with ".CDS" appended
mRNA_parent_dict = dict()
for contig in feature_list:
for ftr in feature_list[contig]:
if ftr["type"] in self.skip_types:
continue
if "Parent" in ftr:
# Retain old_id of parents
old_id = ftr["ID"]
if ftr["ID"] == ftr["Parent"] or "CDS" in ftr["type"]:
ftr["ID"] = ftr["Parent"]+"."+ftr["type"]
# link old to new ids for mRNA to use with CDS
if "RNA" in ftr["type"]:
mRNA_parent_dict[old_id] = ftr["ID"]
return feature_list
def _check_location_order(self, locations):
"""If order looks good return None.
If out of order return warning
If on multiple strands return warning"""
strand = None
last_start = 0
for location in locations:
if strand is None:
strand = location[2]
elif strand != location[2]:
return warnings["both_strand_coordinates"]
if strand == "-":
locations = reversed(locations)
for location in locations:
if last_start > location[1]:
return warnings["out_of_order"]
else:
last_start = location[1]
return None
def _create_ontology_event(self, ontology_type):
"""Creates the ontology_event if necessary
Returns the index of the ontology event back."""
if ontology_type not in self.ont_mappings:
raise ValueError("{} is not a supported ontology".format(ontology_type))
if "event_index" not in self.ont_mappings[ontology_type]:
self.ont_mappings[ontology_type]['event_index'] = len(self.ontology_events)
if ontology_type == "GO":
ontology_ref = "KBaseOntology/gene_ontology"
elif ontology_type == "PO":
ontology_ref = "KBaseOntology/plant_ontology"
else:
ontology_ref = f"KBaseOntology/{ontology_type.lower()}_ontology"
self.ontology_events.append({
"method": "GenomeFileUtils Genbank uploader from annotations",
"method_version": self.version,
"timestamp": self.time_string,
"id": ontology_type,
"ontology_ref": ontology_ref
})
return self.ont_mappings[ontology_type]['event_index']
def _get_ontology_db_xrefs(self, feature):
"""Splits the ontology info from the other db_xrefs"""
ontology = collections.defaultdict(dict) # type: dict
db_xrefs = []
# these are keys are formatted strangely and require special parsing
for key in ("go_process", "go_function", "go_component"):
ontology_event_index = self._create_ontology_event("GO")
for term in feature.get(key, []):
sp = term.split(" - ")
ontology['GO'][sp[0]] = [ontology_event_index]
self.ontologies_present['GO'][sp[0]] = self.ont_mappings['GO'].get(sp[0], '')
# CATH terms are not distinct from EC numbers so myst be found by key
for term in feature.get('cath_funfam', []) + feature.get('cath', []):
for ref in term.split(','):
ontology['CATH'][ref] = [self._create_ontology_event("CATH")]
self.ontologies_present['CATH'][ref] = self.ont_mappings['CATH'].get(ref, '')
search_keys = ['ontology_term', 'db_xref', 'dbxref', 'product_source', 'tigrfam', 'pfam',
'cog', 'go', 'po', 'ko']
ont_terms = [] # type: list
# flatten out into list of values
for key in search_keys:
if key in feature:
ont_terms += [x for y in feature[key] for x in y.split(',')]
for ref in ont_terms:
if ref.startswith('GO:'):
ontology['GO'][ref] = [self._create_ontology_event("GO")]
self.ontologies_present['GO'][ref] = self.ont_mappings['GO'].get(ref, '')
elif ref.startswith('PO:'):
ontology['PO'][ref] = [self._create_ontology_event("PO")]
self.ontologies_present['PO'][ref] = self.ont_mappings['PO'].get(ref, '')
elif ref.startswith('KO:'):
ontology['KO'][ref] = [self._create_ontology_event("KO")]
self.ontologies_present['KO'][ref] = self.ont_mappings['KO'].get(ref, '')
elif ref.startswith('COG'):
ontology['COG'][ref] = [self._create_ontology_event("COG")]
self.ontologies_present['COG'][ref] = self.ont_mappings['COG'].get(ref, '')
elif ref.startswith('PF'):
ontology['PFAM'][ref] = [self._create_ontology_event("PFAM")]
self.ontologies_present['PFAM'][ref] = self.ont_mappings['PFAM'].get(ref, '')
elif ref.startswith('TIGR'):
ontology['TIGRFAM'][ref] = [self._create_ontology_event("TIGRFAM")]
self.ontologies_present['TIGRFAM'][ref] = self.ont_mappings['TIGRFAM'].get(ref, '')
elif ":" not in ref:
db_xrefs.append(tuple(["Unknown_Source", ref]))
else:
db_xrefs.append(tuple(ref.split(":", 1)))
return dict(ontology), db_xrefs
'''
Metagenome Changes:
okay looks like this might be the real meat of it
'''
def _transform_feature(self, contig, in_feature):
"""Converts a feature from the gff ftr format into the appropriate
format for a genome object """
def _aliases(feat):
keys = ('locus_tag', 'old_locus_tag', 'protein_id',
'transcript_id', 'gene', 'ec_number', 'gene_synonym')
alias_list = []
for key in keys:
if key in feat['attributes']:
alias_list.extend([(key, val) for val in feat['attributes'][key]])
return alias_list
if in_feature['start'] < 1 or in_feature['end'] > len(contig):
self.warn(f"Feature with invalid location for specified contig: {in_feature}")
if self.strict:
raise ValueError("Features must be completely contained within the Contig in the "
f"Fasta file. Feature: in_feature")
return
feat_seq = contig.seq[in_feature['start']-1:in_feature['end']].upper()
if in_feature['strand'] in {'-', '-1'}:
feat_seq = feat_seq.reverse_complement()
# if the feature ID is duplicated (CDS or transpliced gene) we only
# need to update the location and dna_sequence
if in_feature.get('ID') in self.feature_dict:
existing = self.feature_dict[in_feature['ID']]
existing['location'].append(self._location(in_feature))
existing['dna_sequence'] = existing.get('dna_sequence', '') + str(feat_seq)
existing['dna_sequence_length'] = len(existing['dna_sequence'])
return
# The following is common to all the feature types
out_feat = {
"id": in_feature.get('ID'),
"type": in_feature['type'],
"location": [self._location(in_feature)],
"dna_sequence": str(feat_seq),
"dna_sequence_length": len(feat_seq),
"md5": hashlib.md5(str(feat_seq).encode('utf8')).hexdigest(),
"warnings": [],
"flags": [],
}
# add optional fields
if 'note' in in_feature['attributes']:
out_feat['note'] = in_feature['attributes']["note"][0]
ont, db_xrefs = self._get_ontology_db_xrefs(in_feature['attributes'])
if ont:
out_feat['ontology_terms'] = ont
aliases = _aliases(in_feature)
if aliases:
out_feat['aliases'] = aliases
if db_xrefs:
out_feat['db_xrefs'] = db_xrefs
if 'product' in in_feature['attributes']:
out_feat['functions'] = in_feature['attributes']["product"]
if 'product_name' in in_feature['attributes']:
if "functions" in out_feat:
out_feat['functions'].extend(in_feature['attributes']["product_name"])
else:
out_feat['functions'] = in_feature['attributes']["product_name"]
if 'function' in in_feature['attributes']:
out_feat['functional_descriptions'] = in_feature['attributes']["function"]
if 'inference' in in_feature['attributes']:
GenomeUtils.parse_inferences(in_feature['attributes']['inference'])
if 'trans-splicing' in in_feature['attributes'].get('exception', []):
out_feat['flags'].append('trans_splicing')
if 'pseudo' in in_feature['attributes'].get('exception', []):
out_feat['flags'].append('pseudo')
if 'ribosomal-slippage' in in_feature['attributes'].get('exception', []):
out_feat['flags'].append('ribosomal_slippage')
parent_id = in_feature.get('Parent', '')
if parent_id and parent_id not in self.feature_dict:
raise ValueError(f"Parent ID: {parent_id} was not found in feature ID list.")
# if the feature is a exon or UTR, it will only be used to update the
# location and sequence of it's parent, we add the info to it parent
# feature but not the feature dict
if in_feature['type'] in self.skip_types:
if parent_id and in_feature['type'] in {'exon', 'five_prime_UTR', 'three_prime_UTR'}:
parent = self.feature_dict[parent_id]
if in_feature['type'] not in parent:
parent[in_feature['type']] = []
parent[in_feature['type']].append(out_feat)
return
# add type specific features
elif 'gene' in in_feature['type']:
out_feat['protein_translation_length'] = 0
out_feat['cdss'] = []
elif in_feature['type'] == 'CDS':
if parent_id:
parent = self.feature_dict[parent_id]
if 'cdss' in parent: # parent must be a gene
if not is_parent(parent, out_feat):
parent["warnings"] = parent.get('warnings', []) + [
warnings["genes_CDS_child_fails_location_validation"].format(out_feat["id"])]
out_feat["warnings"].append(
warnings["CDS_fail_child_of_gene_coordinate_validation"].format(parent_id))
parent['cdss'].append(in_feature['ID'])
out_feat['parent_gene'] = parent_id
else: # parent must be mRNA
if not is_parent(parent, out_feat):
parent["warnings"] = parent.get('warnings', []) + [
warnings["mRNA_fail_parent_coordinate_validation"].format(out_feat["id"])]
out_feat["warnings"].append(
warnings["CDS_fail_child_of_mRNA_coordinate_validation"].format(parent_id))
parent['cds'] = in_feature['ID']
out_feat['parent_mrna'] = parent_id
parent_gene = self.feature_dict[parent['parent_gene']]
parent_gene['cdss'].append(in_feature['ID'])
out_feat['parent_gene'] = parent['parent_gene']
# keep track of CDSs for post processing
self.cdss.add(out_feat['id'])
elif in_feature['type'] == 'mRNA':
if parent_id:
parent = self.feature_dict[parent_id]
if 'mrnas' not in parent:
parent['mrnas'] = []
if 'cdss' in parent: # parent must be a gene
parent['mrnas'].append(in_feature['ID'])
out_feat['parent_gene'] = parent_id
if not is_parent(parent, out_feat):
parent["warnings"] = parent.get('warnings', []) + [
warnings["genes_mRNA_child_fails_location_validation"].format(out_feat["id"])]
out_feat["warnings"].append(
warnings["mRNAs_parent_gene_fails_location_validation"].format(parent_id))
else:
out_feat["type"] = in_feature['type']
# this prevents big misc_features from blowing up the genome size
if out_feat['dna_sequence_length'] > MAX_MISC_FEATURE_SIZE:
del out_feat['dna_sequence']
if parent_id:
parent = self.feature_dict[parent_id]
if 'children' not in parent:
parent['children'] = []
parent['children'].append(out_feat['id'])
out_feat['parent_gene'] = parent_id
if not is_parent(parent, out_feat):
parent["warnings"] = parent.get('warnings', []) + [
warnings["generic_parents_child_fails_location_validation"].format(out_feat["id"])]
out_feat["warnings"].append(
warnings["generic_childs_parent_fails_location_validation"].format(parent_id))
# cleanup empty optional arrays
for key in ['warnings', 'flags']:
if not out_feat[key]:
del out_feat[key]
self.feature_dict[out_feat['id']] = out_feat
def _process_cdss(self, prot_fasta_path):
"""Because CDSs can have multiple fragments, it's necessary to go
back over them to calculate a final protein sequence"""
if self.is_metagenome:
prot_fasta = {} # type: dict
untranslatable_prot = set()
for cds_id in self.cdss:
cds = self.feature_dict[cds_id]
try:
prot_seq = str(Seq(cds['dna_sequence']).translate(
self.code_table, cds=True).strip("*"))
except TranslationError as e:
cds['warnings'] = cds.get('warnings', []) + [str(e)]
# NOTE: we may need a different way of handling this for metagenomes.
prot_seq = ""
if self.is_metagenome:
untranslatable_prot.add(cds_id)
if self.is_metagenome:
if prot_seq != "":
protein_id = ""
if cds.get("aliases"):
aliases = cds['aliases']
for key, val in aliases:
if key == "protein_id":
protein_id = val
if not protein_id:
protein_id = cds['id'] # assign to some default
else:
# log a warning here?
pass
# TODO: update header to reflect what we actually want people
# to see.
if protein_id in prot_fasta:
prot_fasta[protein_id][0] += "|" + cds['id']
else:
fasta_seq_data = ">" + protein_id + " cds_ids:" + cds['id']
prot_fasta[protein_id] = [fasta_seq_data, prot_seq]
else:
pass
else:
cds.update({
"protein_translation": prot_seq,
"protein_md5": hashlib.md5(prot_seq.encode('utf8')).hexdigest(),
"protein_translation_length": len(prot_seq),
})
if 'parent_gene' in cds:
parent_gene = self.feature_dict[cds['parent_gene']]
# no propigation for now
propagate_cds_props_to_gene(cds, parent_gene, self.is_metagenome)
elif self.generate_genes:
spoof = copy.copy(cds)
spoof['type'] = 'gene'
spoof['id'] = cds['id']+"_gene"
spoof['cdss'] = [cds['id']]
spoof['warnings'] = [warnings['spoofed_gene'].format(cds['id'])]
self.feature_dict[spoof['id']] = spoof
cds['parent_gene'] = spoof['id']
self.spoof_gene_count += 1
else:
raise ValueError(warnings['no_spoof'])
self.feature_dict[cds['id']] = cds
if self.is_metagenome:
with open(prot_fasta_path, 'w') as fid:
for key, line in prot_fasta.items():
fid.write('\n'.join(line))
# do something with 'untranslatable_prot'
def _update_from_exons(self, feature):
"""This function updates the sequence and location of a feature based
on it's UTRs, CDSs and exon information"""
# note that start and end here are in direction of translation
def start(loc):
return loc[0][1]
def end(loc):
if loc[-1][2] == "+":
return loc[-1][1] + loc[-1][3] + 1
else:
return loc[-1][1] - loc[-1][3] - 1
if 'exon' in feature:
# update the feature with the exon locations and sequences
feature['location'] = [x['location'][0] for x in feature['exon']]
feature['dna_sequence'] = "".join(
x['dna_sequence'] for x in feature['exon'])
feature['dna_sequence_length'] = len(feature['dna_sequence'])
# construct feature location from utrs and cdss if present
elif 'cds' in feature:
cds = [copy.deepcopy(self.feature_dict[feature['cds']])]
locs = [] # type: list
seq = ""
for frag in feature.get('five_prime_UTR', []) + cds + \
feature.get('three_prime_UTR', []):
# merge into last location if adjacent
if locs and abs(end(locs) - start(frag['location'])) == 1:
# extend the location length by the length of the first
# location in the fragment
first = frag['location'].pop(0)
locs[-1][3] += first[3]
locs.extend(frag['location'])
seq += frag['dna_sequence']
feature['location'] = locs
feature['dna_sequence'] = seq
feature['dna_sequence_length'] = len(seq)
# remove these properties as they are no longer needed
for x in ['five_prime_UTR', 'three_prime_UTR', 'exon']:
feature.pop(x, None)
else:
ValueError('Feature {feature["id"]} must contain either exon or cds data to '
'construct an accurate location and sequence')
def _gen_genome_info(self, assembly_ref, assembly, input_gff_file, molecule_type, prot_fasta_path, params):
"""
_gen_genome_info: generate genome info
Here is the meat of the saving operation.
Genome Fields:
features: protein encoding genes
cdss:
mrnas: mrna sequences
non_coding_features: everything that doesn't fall into 'features',
'cdss', 'mrnas'
"""
features = []
cdss = []
mrnas = []
non_coding_features = []
genome = {
"id": params.get('genome_name'),
"scientific_name": params.get('scientific_name', "Unknown"),
"assembly_ref": assembly_ref,
'molecule_type': molecule_type,
"gc_content": assembly["gc_content"],
"dna_size": assembly["dna_size"],
'md5': assembly['md5'],
'num_contigs': len(assembly['contigs']),
'ontologies_present': dict(self.ontologies_present),
'ontology_events': self.ontology_events,
}
if self.is_metagenome:
metagenome_fields = [
("publications", []),
("external_source_origination_date", None),
("original_source_file_name", None),
("notes", None),
# NOTE: in the future environment should use an ontology.
("environment", None),
] # type: list
for field, default in metagenome_fields:
genome[field] = params.get(field, default)
# save protein fasta to shock
prot_to_shock = self.dfu.file_to_shock(
{'file_path': prot_fasta_path, 'make_handle': 1, 'pack': 'gzip'}
)
genome['protein_handle_ref'] = prot_to_shock['handle']['hid']
genome['contig_ids'], genome['contig_lengths'] = zip(
*[(k, v['length']) for k, v in assembly['contigs'].items()])
if self.is_metagenome:
genome['source'], _ = self.gi.determine_tier(params.get('source'))
else:
genome['source'], genome['genome_tiers'] = self.gi.determine_tier(params.get('source'))
# Set taxonomy-related fields in the genome data
if params.get('taxon_id'):
GenomeUtils.set_taxon_data(int(params['taxon_id']), self.re_api_url, genome)
else:
GenomeUtils.set_default_taxon_data(genome)
# handle optional fields
for key in ('release', 'genetic_code', 'genome_type', 'source_id'):
if params.get(key):
genome[key] = params[key]
# Phytozome gff files are not compatible with the RNASeq Pipeline
# so it's better to build from the object than cache the file
if self.is_phytozome or self.is_metagenome:
gff_file_to_shock = self.dfu.file_to_shock(
{'file_path': input_gff_file, 'make_handle': 1, 'pack': "gzip"})
genome['gff_handle_ref'] = gff_file_to_shock['handle']['hid']
logging.info("Scan features")
tot = len(self.feature_dict.values())
ct = 0
for feature in self.feature_dict.values():
if (ct%1000000) == 0:
logging.debug("... %d of %d done" % (ct, tot))
ct += 1
self.feature_counts[feature['type']] += 1
if 'exon' in feature or feature['type'] == 'mRNA':
self._update_from_exons(feature)
# Test if location order is in order.
is_transpliced = "flags" in feature and "trans_splicing" in feature["flags"]
if not is_transpliced and len(feature["location"]) > 1:
# Check the order only if not trans_spliced and has more than 1 location.
location_warning = self._check_location_order(feature["location"])
if location_warning is not None:
feature["warnings"] = feature.get('warnings', []) + [location_warning]
contig_id = feature["location"][0][0]
contig_len = assembly['contigs'][contig_id]["length"]
feature = check_full_contig_length_or_multi_strand_feature(
feature, is_transpliced, contig_len, self.skip_types)
# sort features into their respective arrays
if feature['type'] == 'CDS':
if not self.is_metagenome:
del feature['type']
cdss.append(feature)
elif feature['type'] == 'mRNA':
if not self.is_metagenome:
del feature['type']
mrnas.append(feature)
elif feature['type'] == 'gene':
# remove duplicates that may arise from CDS info propagation
for key in ('functions', 'aliases', 'db_xrefs'):
if key in feature:
feature[key] = list(set(feature[key]))
if feature['cdss']:
if not self.is_metagenome:
del feature['type']
self.feature_counts["protein_encoding_gene"] += 1
features.append(feature)
else:
feature.pop('mrnas', None)
feature.pop('cdss', None)
feature.pop('protein_translation_length', None)
self.feature_counts["non_coding_gene"] += 1
non_coding_features.append(feature)
else:
non_coding_features.append(feature)
# if input is metagenome, save features, cdss, non_coding_features, and
# mrnas to shock
if self.is_metagenome:
# TODO: make this section more efficient by editing the above.
metagenome_features = features + cdss + mrnas + non_coding_features
genome['num_features'] = len(metagenome_features)
genome_name = params['genome_name']
json_file_path = f'{self.cfg.sharedFolder}/{genome_name}_features.json'
# save to json files first
with open(json_file_path, 'w') as fid:
json.dump(metagenome_features, fid)
# write json to shock
json_to_shock = self.dfu.file_to_shock(
{'file_path': json_file_path, 'make_handle': 1, 'pack': 'gzip'}
)
self.feature_counts["non_coding_features"] = len(non_coding_features)
genome['features_handle_ref'] = json_to_shock['handle']['hid']
# remove json file to avoid disk overload
os.remove(json_file_path)
# delete python objects to reduce overhead
del metagenome_features
del features, cdss, mrnas, non_coding_features
else:
# TODO determine whether we want to deepcopy here instead of reference.
genome['features'] = features
genome['cdss'] = cdss
genome['mrnas'] = mrnas
genome['non_coding_features'] = non_coding_features
self.feature_counts["non_coding_features"] = len(genome['non_coding_features'])
if self.warnings:
genome['warnings'] = self.warnings
genome['feature_counts'] = dict(self.feature_counts)
return genome
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CountryTranslation'
db.create_table(u'cities_light_country_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['cities_light.Country'])),
))
db.send_create_signal(u'cities_light', ['CountryTranslation'])
# Adding unique constraint on 'CountryTranslation', fields ['name', 'language_code', 'master']
db.create_unique(u'cities_light_country_translation', ['name', 'language_code', 'master_id'])
# Adding unique constraint on 'CountryTranslation', fields ['language_code', 'master']
db.create_unique(u'cities_light_country_translation', ['language_code', 'master_id'])
# Adding model 'Country'
db.create_table(u'cities_light_country', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name_ascii', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=200, blank=True)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from='name_ascii')),
('geoname_id', self.gf('django.db.models.fields.IntegerField')(unique=True, null=True, blank=True)),
('alternate_names', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('state', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
('default_language', self.gf('django.db.models.fields.CharField')(default='en', max_length=2)),
('code2', self.gf('django.db.models.fields.CharField')(max_length=2, unique=True, null=True, blank=True)),
('code3', self.gf('django.db.models.fields.CharField')(max_length=3, unique=True, null=True, blank=True)),
('continent', self.gf('django.db.models.fields.CharField')(max_length=2, db_index=True)),
('tld', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=5, blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=20, null=True)),
))
db.send_create_signal(u'cities_light', ['Country'])
# Adding model 'RegionTranslation'
db.create_table(u'cities_light_region_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['cities_light.Region'])),
))
db.send_create_signal(u'cities_light', ['RegionTranslation'])
# Adding unique constraint on 'RegionTranslation', fields ['name', 'language_code', 'master']
db.create_unique(u'cities_light_region_translation', ['name', 'language_code', 'master_id'])
# Adding unique constraint on 'RegionTranslation', fields ['language_code', 'master']
db.create_unique(u'cities_light_region_translation', ['language_code', 'master_id'])
# Adding model 'Region'
db.create_table(u'cities_light_region', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name_ascii', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=200, blank=True)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from='name_ascii')),
('geoname_id', self.gf('django.db.models.fields.IntegerField')(unique=True, null=True, blank=True)),
('alternate_names', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('state', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
('default_language', self.gf('django.db.models.fields.CharField')(default='en', max_length=2)),
('geoname_code', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'])),
))
db.send_create_signal(u'cities_light', ['Region'])
# Adding unique constraint on 'Region', fields ['country', 'slug']
db.create_unique(u'cities_light_region', ['country_id', 'slug'])
# Adding model 'CityTranslation'
db.create_table(u'cities_light_city_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['cities_light.City'])),
))
db.send_create_signal(u'cities_light', ['CityTranslation'])
# Adding unique constraint on 'CityTranslation', fields ['name', 'language_code', 'master']
db.create_unique(u'cities_light_city_translation', ['name', 'language_code', 'master_id'])
# Adding unique constraint on 'CityTranslation', fields ['language_code', 'master']
db.create_unique(u'cities_light_city_translation', ['language_code', 'master_id'])
# Adding model 'City'
db.create_table(u'cities_light_city', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name_ascii', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=200, blank=True)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from='name_ascii')),
('geoname_id', self.gf('django.db.models.fields.IntegerField')(unique=True, null=True, blank=True)),
('alternate_names', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('state', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
('default_language', self.gf('django.db.models.fields.CharField')(default='en', max_length=2)),
('search_names', self.gf('cities_light.models.ToSearchTextField')(default='', max_length=4000, blank=True)),
('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=5, blank=True)),
('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=5, blank=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Region'], null=True, blank=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cities_light.Country'])),
('population', self.gf('django.db.models.fields.BigIntegerField')(db_index=True, null=True, blank=True)),
('feature_code', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=10, null=True, blank=True)),
))
db.send_create_signal(u'cities_light', ['City'])
# Adding unique constraint on 'City', fields ['region', 'slug']
db.create_unique(u'cities_light_city', ['region_id', 'slug'])
def backwards(self, orm):
# Removing unique constraint on 'City', fields ['region', 'slug']
db.delete_unique(u'cities_light_city', ['region_id', 'slug'])
# Removing unique constraint on 'CityTranslation', fields ['language_code', 'master']
db.delete_unique(u'cities_light_city_translation', ['language_code', 'master_id'])
# Removing unique constraint on 'CityTranslation', fields ['name', 'language_code', 'master']
db.delete_unique(u'cities_light_city_translation', ['name', 'language_code', 'master_id'])
# Removing unique constraint on 'Region', fields ['country', 'slug']
db.delete_unique(u'cities_light_region', ['country_id', 'slug'])
# Removing unique constraint on 'RegionTranslation', fields ['language_code', 'master']
db.delete_unique(u'cities_light_region_translation', ['language_code', 'master_id'])
# Removing unique constraint on 'RegionTranslation', fields ['name', 'language_code', 'master']
db.delete_unique(u'cities_light_region_translation', ['name', 'language_code', 'master_id'])
# Removing unique constraint on 'CountryTranslation', fields ['language_code', 'master']
db.delete_unique(u'cities_light_country_translation', ['language_code', 'master_id'])
# Removing unique constraint on 'CountryTranslation', fields ['name', 'language_code', 'master']
db.delete_unique(u'cities_light_country_translation', ['name', 'language_code', 'master_id'])
# Deleting model 'CountryTranslation'
db.delete_table(u'cities_light_country_translation')
# Deleting model 'Country'
db.delete_table(u'cities_light_country')
# Deleting model 'RegionTranslation'
db.delete_table(u'cities_light_region_translation')
# Deleting model 'Region'
db.delete_table(u'cities_light_region')
# Deleting model 'CityTranslation'
db.delete_table(u'cities_light_city_translation')
# Deleting model 'City'
db.delete_table(u'cities_light_city')
models = {
u'cities_light.city': {
'Meta': {'unique_together': "(('region', 'slug'),)", 'object_name': 'City', 'index_together': '()'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']"}),
'default_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'feature_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'population': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Region']", 'null': 'True', 'blank': 'True'}),
'search_names': ('cities_light.models.ToSearchTextField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"}),
'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'cities_light.citytranslation': {
'Meta': {'unique_together': "(('name', 'language_code', 'master'), ('language_code', 'master'))", 'object_name': 'CityTranslation', 'db_table': "u'cities_light_city_translation'", 'index_together': '()'},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['cities_light.City']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
u'cities_light.country': {
'Meta': {'unique_together': '()', 'object_name': 'Country', 'index_together': '()'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'default_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"}),
'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
u'cities_light.countrytranslation': {
'Meta': {'unique_together': "(('name', 'language_code', 'master'), ('language_code', 'master'))", 'object_name': 'CountryTranslation', 'db_table': "u'cities_light_country_translation'", 'index_together': '()'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['cities_light.Country']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'cities_light.region': {
'Meta': {'unique_together': "(('country', 'slug'),)", 'object_name': 'Region', 'index_together': '()'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities_light.Country']"}),
'default_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name_ascii'"}),
'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'cities_light.regiontranslation': {
'Meta': {'unique_together': "(('name', 'language_code', 'master'), ('language_code', 'master'))", 'object_name': 'RegionTranslation', 'db_table': "u'cities_light_region_translation'", 'index_together': '()'},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['cities_light.Region']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['cities_light']
|
|
import bpy
import bpy_extras
import bmesh
import mathutils
import math
#import copy
bl_info = {
"name": "Viewport Vertex Alignment",
"author": "Hades",
"version": (0, 2),
"blender": (2, 6, 9),
"location": "View3D > Mesh Edit > Toolshelf > Vertex Alignment",
"description": "Aligns selected vertices based on a best fit algorithm, to the view port.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
def debug(msg):
#print(msg)
pass
def debug_points(points,rotation):
r=mathutils.Matrix.Rotation(math.radians(rotation),4,'Z')
f=open('c:/blender/points.txt','w')
f.write(str(['x','y','z','w','rx','ry','rz','rw','dx','dy','dz','dw','weight','residual weight']))
f.write('\n')
for p in points:
pr=p['point']*r
f.write(str([p['point'].x,p['point'].y,p['point'].z,p['point'].w,pr.x,pr.y,pr.z,pr.w,p['delta'].x,p['delta'].y,p['delta'].z,p['delta'].w,p['weight'],p['residual weight']]))
f.write('\n')
f.close()
def debug_error(error):
f=open('c:/blender/error.txt','w')
f.write(str(['error sum','mean','stdev','residuals','devs']))
f.write('\n')
for i in range(len(error['residuals'])):
f.write(str([error['error sum'],error['mean'],error['stdev'],error['residuals'][i],error['devs'][i]]))
f.write('\n')
f.close()
def main(context,properties):
#debug("\nVertex Alignment operator:-----")
#import os
#os.system('cls')
obj=bpy.context.object
if (obj.mode == 'EDIT')and(bpy.context.space_data.type=="VIEW_3D") :
bm=bmesh.from_edit_mesh(obj.data)
#debug('\nSelected Vertices:')
vertices = get_vertices(bm)
#debug([v for v in vertices])
if (len(vertices) <= 2):
#debug("mesh.vertex_alignment: Not enough vertices selected")
return {'CANCELLED'}
#debug('\nAxis:')
axis = get_axis('perspective')
#debug(axis)
#debug('\nProjection:')
points = project(vertices,axis)
#debug([p['point'] for p in points])
#debug('\nFit:')
points = fit1(properties,points) #points is being updated by ref-- note the right hand assignment is unnecessary !
#debug([p['delta'] for p in points])
#debug('\nUnproject:')
vertices_updated = unproject(points,axis,properties)
#debug([p["v'"] for p in vertices_updated])
#debug("\nUpdate Vertices:")
update_vertices(obj.data,points)
#debug ("\nend function------")
return {'FINISHED'}
#def filter_outliers(points,rotate,properties,errori):
# """This function will lower the weight of points with residuals that are outside of one standard deviation"""
# print("FILTER_OUTLIERS FUNCTION:")
# print(errori['stdev'])
# if (errori['stdev']>1.0):
# ind=0
# for d in errori['devs']:
# if (math.sqrt(d) >= errori['stdev']):
# points[ind]['weight']=0
# points[ind]['residual weight']=0
# errori['outliers'] += 1
#
# ind+=1
# return points
def filter_anchor(points,rotate,properties,error):
"""This function will add extreme weighting to the boundary points"""
#debug('Anchor: shifting weights')
#this funciton only works because the fit_functions will sort points before this function is called.
max_weight=10000
points[0]['weight'] = max_weight
points[-1]['weight'] = max_weight
points[0]['residual weight'] = 1
points[-1]['residual weight'] = 1
return points
def filter_reset_weights(points):
for p in points:
p['weight']=1
p['residual weight']=1
return points
#def copy_deltas(points):
# a={}
# for p in points:
# a[p['id']]=p['delta']
# return a
#def paste_deltas(points,best_deltas):
# for d in points['delta']:
def fit1(properties,points):
"""This function applies the fitting function several times, finding the axis rotation that causes the smallest error and returns the points.
This expects a 1D fit where x is the domain, y is the range (and therefore y is being affected in fit)."""
#debug("\nFunction Trial:")
#note: points is being treated as though it's 'by reference'-- inner values such as deltas are being changed on the main object, so be careful on order of operations.
fit_function=properties['function']
iterations=properties['iterations']
max_error=9999999999999999999999999
error=[] #list of dictionaries
smallest_error=max_error
min_error=0
#best_points=None
min_theta=0
theta=0
theta_step_initial=45
theta_step=theta_step_initial
theta_forward=True
for i in range(iterations):#angles
anchor=properties['anchor']
#outlier_filter=properties['outlier_filter']
points=filter_reset_weights(points)
try:
error.append({'failed':True,'error sum':max_error,'stdev':0,'mean':max_error,'residuals':[max_error],'devs':[0]}) #'outliers':error[i]['outliers']
while True: #filters
error[i]={'failed':True,'error sum':max_error,'stdev':0,'mean':max_error,'residuals':[max_error],'devs':[0]}
points=fit_function(points,theta,properties)
error[i]={'failed':False,'error sum':0,'stdev':0,'mean':0,'residuals':[],'devs':[]} #reset it-- in case an exception is thrown in the fit_function
SrN=0
for p in points:
error[i]['residuals'].append(
math.pow(
math.sqrt(
math.pow(p['delta'].x,2)+math.pow(p['delta'].y,2)+math.pow(p['delta'].z,2)+math.pow(p['delta'].w,2)
)
,2)*p['residual weight']
)
error[i]['error sum'] += error[i]['residuals'][-1]
SrN += p['residual weight']
N=SrN #len(error[i]['residuals'])
#print(N)
error[i]['mean']=error[i]['error sum'] / N
for e in error[i]['residuals']:
error[i]['devs'].append(math.pow(e - error[i]['mean'],2))
error[i]['stdev'] += error[i]['devs'][-1]
error[i]['stdev'] = math.sqrt(error[i]['stdev']/N)
if (not anchor):#or(outlier_filter)):
break
#if (outlier_filter):
# if ((error[i]['stdev'] <= properties['outlier_filter_target'])or(error[i]['outliers'] >= len(points)-3)): #you need at least 3 points to attempt to describe a curve.
# outlier_filter=False
# else:
# prev_outliers=error[i]['outliers']
# points=filter_outliers(points,theta,properties,error[i])
# print(["IF",prev_outliers,error[i]['outliers'],prev_outliers == error[i]['outliers']])
#
# if (error[i]['outliers'] == prev_outliers): #no more matches were found.
# print("NO MORE OUTLIERS")
# outlier_filter=False
if (anchor):
points=filter_anchor(points,theta,properties,error)
anchor=False
#print([i,theta,outlier_filter,anchor,error[i]['stdev'],error[i]['outliers']])
if (error[i]['error sum'] < smallest_error):
smallest_error=error[i]['error sum']
min_error=i
min_theta=theta
#best_points=copy.copy(points)
except ValueError as e:
print(e)
except ZeroDivisionError as e:
print(e)
#angle convergence:
if (i>360/theta_step_initial): #let it run around the cirlce a full time first, then search for the smallest error
if (theta_forward):
if (error[i]['error sum'] == smallest_error):
theta+=theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step/=2.0
theta-=theta_step
theta_forward=False
else:
if (error[i]['error sum'] == smallest_error):
theta-=theta_step
elif (error[i]['error sum'] > smallest_error):
theta_step/=2.0
theta+=theta_step
theta_forward=True
elif (i == 360/theta_step_initial):
theta=min_theta
theta_step/=2.0
else:
theta+=theta_step
if (theta_step <= 0.000000001): #best angle found (or very close !)
break
#debug_error(error[min_error])
#debug_points(points,min_theta)
#one more time, the full 2 step procedure (1: dry, 2: filtered);
anchor=properties['anchor']
points=filter_reset_weights(points)
points=fit_function(points,min_theta,properties)
#if (outlier_filter):
# points=filter_outliers(points,min_theta,properties,error[min_error])
# outlier_filter=False
if (anchor):
points=filter_anchor(points,min_theta,properties,error)
anchor=False
points=fit_function(points,min_theta,properties)
#points=best_points
return points
def error_residual1(points, r , rr, properties, line_func, line_parameters):
"""This function is used in the fitting functions to determine the deltas """
#print("Residual Errors:")
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
yy = line_func(x,line_parameters)
p['delta'] = mathutils.Vector((0,(y - yy),0,0))*rr
return points
def sort_index1(points,r):
"""This function sorts points based on their domain (assumed as x axis when rotated) """
#print("Sorting Indices:")
points = sorted(points, key=lambda xx: (xx['point']*r).x)
return points
def fit_linear1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a linear line: y = a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy += x*y * p['weight']
Sx += x * p['weight']
Sy += y * p['weight']
Sx2 += math.pow(x,2) * p['weight']
Sw += p['weight']
N = Sw
a1 = ( N*Sxy - Sx*Sy ) / ( N*Sx2 - math.pow(Sx,2))
a0 = 1/N * Sy - a1 * 1/N * Sx
def line_func(x,a):
return a[0] + a[1]*x
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1])
def fit_quadratic1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a quadratic polynomial line: y = a2*x^2 + a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3 = 0
Sx4 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy = Sxy + x*y * p['weight']
Sx = Sx + x * p['weight']
Sy = Sy + y * p['weight']
Sx2 = Sx2 + math.pow(x,2) * p['weight']
Sx2y = Sx2y+ math.pow(x,2)*y * p['weight']
Sx3 = Sx3 + math.pow(x,3) * p['weight']
Sx4 = Sx4 + math.pow(x,4) * p['weight']
Sw += p['weight']
N = Sw
A=[[N, Sx, Sx2,Sy], [Sx, Sx2, Sx3,Sxy], [Sx2, Sx3, Sx4,Sx2y]]
xM=like_a_gauss(A)
a0=xM[0][3]
a1=xM[1][3]
a2=xM[2][3]
def line_func(x,a):
return a[0] + a[1]*x + a[2]*math.pow(x,2)
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2])
def fit_cubic1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3y = 0
Sx3 = 0
Sx4 = 0
Sx5 = 0
Sx6 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy = Sxy + x*y * p['weight']
Sx = Sx + x * p['weight']
Sy = Sy + y * p['weight']
Sx2 = Sx2 + math.pow(x,2) * p['weight']
Sx2y = Sx2y+ math.pow(x,2)*y * p['weight']
Sx3y = Sx3y+ math.pow(x,3)*y * p['weight']
Sx3 = Sx3 + math.pow(x,3) * p['weight']
Sx4 = Sx4 + math.pow(x,4) * p['weight']
Sx5 = Sx5 + math.pow(x,5) * p['weight']
Sx6 = Sx6 + math.pow(x,6) * p['weight']
Sw += p['weight']
N = Sw
A=[[N, Sx, Sx2,Sx3,Sy], [Sx, Sx2, Sx3,Sx4,Sxy], [Sx2, Sx3, Sx4, Sx5,Sx2y], [Sx3, Sx4, Sx5, Sx6,Sx3y]]
xM=like_a_gauss(A)
a0=xM[0][4]
a1=xM[1][4]
a2=xM[2][4]
a3=xM[3][4]
def line_func(x,a):
return a[0] + a[1]*x + a[2]*math.pow(x,2) + a[3]*math.pow(x,3)
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,a3])
def fit_cosine1(points,rotate,properties):
"""This function attempts to fit a given set of points to a cosine curve: y = a0 + a1*cos(w*x) + a2*cos(w*x) """
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
omega=properties['cosine_omega']
Sycos = 0
Sysin = 0
Scos = 0
Scos2 = 0
Ssin = 0
Ssin2 = 0
Sy = 0
Scossin = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sy = Sy + y* p['weight']
Sycos=Sycos + y * math.cos(omega * x)* p['weight']
Sysin=Sysin + y * math.sin(omega * x)* p['weight']
Scos = Scos + math.cos(omega * x)* p['weight']
Ssin = Ssin + math.sin(omega * x)* p['weight']
Scos2= Scos2+ math.pow(math.cos(omega * x),2)* p['weight']
Ssin2= Ssin2+ math.pow(math.sin(omega * x),2)* p['weight']
Scossin= Scossin+ math.cos(omega * x) * math.sin(omega * x)* p['weight']
Sw += p['weight']
N = Sw
A=[[N, Scos, Ssin, Sy], [Scos, Scos2, Scossin, Sycos], [Ssin, Scossin, Ssin2, Sysin]]
xM=like_a_gauss(A)
a0=xM[0][3]
a1=xM[1][3]
a2=xM[2][3]
def line_func(x,a):
return a[0] + a[1]*math.cos(a[3] * x) + a[2] * math.sin(a[3] * x);
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,omega])
def get_vertices(mesh):
"""Returns the active list of selected vertices."""
verts = []
for v in mesh.verts:
if v.select:
verts.append(v)
return verts
def get_axis(type):
"""Gets the axis we will be performing the rotation on. Returns a projection matrix"""
if (type == 'perspective'):
region = bpy.context.region
rv3d = bpy.context.region_data
else:
#debug('mesh.vertex_align: get_axis: Unexpected input')
return None
return {"region":region,"rv3d":rv3d}
def project(vertices,axis):
"""Project the vertices onto a plane of the given axis."""
points = []
for v in vertices:
vec = mathutils.Vector(v.co)
p = bpy_extras.view3d_utils.location_3d_to_region_2d(axis['region'],axis['rv3d'],vec).to_4d()
depth = vec
points.append({"id":v,"point":p,"delta":None,"v'":None,"depth":depth,"weight":1.0,"residual weight":1.0,'index':None}) #id=original vert reference, point=project point on plane, d=delta changes by fit function, v' = Vector of final 3d vert position, depth=depth vector needed for unprojecting, weight=how much a point impacts the fit, residual weight=how much a points varience should be counted in the error.
return points
def unproject(points,axis,properties):
"""Unproject points on a plane to vertices in 3d space."""
for p in points:
new_p = p['point']-p['delta']*properties['influence']
old_v = p['id'].co
new_v = bpy_extras.view3d_utils.region_2d_to_location_3d(axis['region'],axis['rv3d'],new_p.to_2d(),p['depth'])
p["v'"]=new_v
return points
def update_vertices(mesh,points):
"""Update the active set of selected vertices with their fitted positions."""
for p in points:
p['id'].co = p["v'"].to_3d().to_tuple()
bmesh.update_edit_mesh(mesh)
def like_a_gauss(mat):
"""
Implementation of the Gaussian Elimination Algorithm for finding the row-reduced echelon form of a given matrix.
No pivoting is done.
Requires Python 3 due to the different behaviour of the division operation in earlier versions of Python.
Released under the Public Domain (if you want it - you probably don't)
https://gist.github.com/zhuowei/7149445
Changes mat into Reduced Row-Echelon Form.
"""
# Let's do forward step first.
# at the end of this for loop, the matrix is in Row-Echelon format.
for i in range(min(len(mat), len(mat[0]))):
# every iteration, ignore one more row and column
for r in range(i, len(mat)):
# find the first row with a nonzero entry in first column
zero_row = mat[r][i] == 0
if zero_row:
continue
# swap current row with first row
mat[i], mat[r] = mat[r], mat[i]
# add multiples of the new first row to lower rows so lower
# entries of first column is zero
first_row_first_col = mat[i][i]
for rr in range(i + 1, len(mat)):
this_row_first = mat[rr][i]
scalarMultiple = -1 * this_row_first / first_row_first_col
for cc in range(i, len(mat[0])):
mat[rr][cc] += mat[i][cc] * scalarMultiple
break
# At the end of the forward step
# Now reduce
for i in range(min(len(mat), len(mat[0])) - 1, -1, -1):
# divide last non-zero row by first non-zero entry
first_elem_col = -1
first_elem = -1
for c in range(len(mat[0])):
if mat[i][c] == 0:
continue
if first_elem_col == -1:
first_elem_col = c
first_elem = mat[i][c]
mat[i][c] /= first_elem
# add multiples of this row so all numbers above the leading 1 is zero
for r in range(i):
this_row_above = mat[r][first_elem_col]
scalarMultiple = -1 * this_row_above
for cc in range(len(mat[0])):
mat[r][cc] += mat[i][cc] * scalarMultiple
# disregard this row and continue
return mat
class OPS_MESH_hd_viewport_vertexalign(bpy.types.Operator):
"""Align Vertices based on a least squares algorithm, based on the active view port."""
bl_idname = "mesh.hd_viewport_vertex_align"
bl_label = "3D Viewport Vertex Alignment"
bl_options = {'REGISTER', 'UNDO'}
function = bpy.props.EnumProperty(
items=[('LINEAR1','1D Linear','Linear Least Squares Method'),
('QUADRATIC1','1D Parabolic','Quadratic Polynomial Least Squares Method'),
('CUBIC1','1D Cubic', 'Cubic Polynomial Least Squares Method'),
('COSINE1','1D Cosine', 'Cosine Least Squares Method')],
name="Fit type",
description="Select the method to align the vertices by.",
default='LINEAR1')
cosine_omega = bpy.props.FloatProperty(
name="Omega",
description="Angular frequency",
default=0.01,
min=0.0001,
step=0.001,
soft_min=0.001)
influence = bpy.props.FloatProperty(
name="Influence",
description="How much the best fit solution is applied.",
default=1,
soft_max=1,
soft_min=0,
step=0.01)
#outlier_filter = bpy.props.BoolProperty(
# name="Outlier Filter",
# description="Should vertices that are outside of the standard deviation be filtered out of the fitting function?",
# default=True)
#outlier_filter_target = bpy.props.FloatProperty(
# name="Standard Deviation target for error deviation.",
# description="How far is too far from a fitted line?",
# default=10,
# min=0.1,
# step=0.5)
iterations = bpy.props.IntProperty(
name="Max Iterations",
description="Max number of iterations to try and solve.",
default=180,
soft_max=180,
min=1)
anchor = bpy.props.BoolProperty(
name="Anchor Boundaries",
description="Should the start and end vertices be anchored?",
default=True)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
if (self.function == 'LINEAR1'):
fit_function=fit_linear1
elif (self.function == 'QUADRATIC1'):
fit_function=fit_quadratic1
elif (self.function == 'CUBIC1'):
fit_function=fit_cubic1
elif (self.function == 'COSINE1'):
fit_function=fit_cosine1
else:
#debug('unexpected input for "function" in mesh.vertex_align')
fit_function=fit_linear1
#,"max_error":math.pow(self.max_error,0.5),,"outlier_filter":self.outlier_filter,"outlier_filter_target":self.outlier_filter_target
properties={"function":fit_function,"cosine_omega":self.cosine_omega,"influence":self.influence,"iterations":self.iterations,"anchor":self.anchor}
return main(context,properties)
class ViewportVertexAlignMenu(bpy.types.Menu):
bl_label = "Vertex Alignment"
bl_idname = "MESH_MT_edit_mesh_hd_viewport_vertex_align"
def draw(self, context):
layout = self.layout
layout.operator("mesh.hd_viewport_vertex_align",text="Linear",icon='CURVE_PATH').function='LINEAR1'
layout.operator("mesh.hd_viewport_vertex_align",text="Parabolic",icon='CURVE_BEZCURVE').function='QUADRATIC1'
layout.operator("mesh.hd_viewport_vertex_align",text="Cubic",icon='CURVE_BEZCURVE').function='CUBIC1'
layout.operator("mesh.hd_viewport_vertex_align",text="Cosine",icon='CURVE_BEZCURVE').function='COSINE1'
def draw_item(self, context):
layout = self.layout
layout.menu(ViewportVertexAlignMenu.bl_idname)
def menu_specials(self, context):
self.layout.menu("MESH_MT_edit_mesh_hd_viewport_vertex_align")
self.layout.separator()
class ViewportVertexAlignPanel(bpy.types.Panel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "3D Viewport Vertex Alignment"
bl_idname = "VIEW3D_PT_hd_viewport_vertex_alignment"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = "mesh_edit"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.operator("mesh.hd_viewport_vertex_align")
def register():
bpy.utils.register_class(OPS_MESH_hd_viewport_vertexalign)
bpy.utils.register_class(ViewportVertexAlignMenu)
bpy.utils.register_class(ViewportVertexAlignPanel)
bpy.types.VIEW3D_MT_edit_mesh_specials.prepend(menu_specials)
bpy.types.INFO_HT_header.append(draw_item)
def unregister():
bpy.utils.unregister_class(OPS_MESH_hd_viewport_vertexalign)
bpy.utils.unregister_class(ViewportVertexAlignMenu)
bpy.utils.unregister_class(ViewportVertexAlignPanel)
bpy.types.VIEW3D_MT_edit_mesh_specials.remove(menu_specials)
bpy.types.INFO_HT_header.remove(draw_item)
if __name__ == "__main__":
register()
#unregister()
#register()
# test call
#bpy.ops.wm.call_menu(name=ViewportVertexAlignMenu.bl_idname)
#unregister()
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build NativeClient toolchain packages."""
import logging
import optparse
import os
import sys
import textwrap
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.file_tools
import pynacl.gsd_storage
import pynacl.log_tools
import pynacl.local_storage_cache
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_DIR = os.path.dirname(SCRIPT_DIR)
ROOT_DIR = os.path.dirname(NACL_DIR)
BUILD_DIR = os.path.join(NACL_DIR, 'build')
PKG_VER_DIR = os.path.join(BUILD_DIR, 'package_version')
sys.path.append(PKG_VER_DIR)
import archive_info
import package_info
import once
import command_options
DEFAULT_CACHE_DIR = os.path.join(SCRIPT_DIR, 'cache')
DEFAULT_GIT_CACHE_DIR = os.path.join(SCRIPT_DIR, 'git_cache')
DEFAULT_SRC_DIR = os.path.join(SCRIPT_DIR, 'src')
DEFAULT_OUT_DIR = os.path.join(SCRIPT_DIR, 'out')
def PrintAnnotatorURL(cloud_item):
"""Print an URL in buildbot annotator form.
Args:
cloud_item: once.CloudStorageItem representing a memoized item in the cloud.
"""
if cloud_item.dir_item:
url = cloud_item.dir_item.url
pynacl.log_tools.WriteAnnotatorLine('@@@STEP_LINK@download@%s@@@' % url)
if cloud_item.log_url:
log_url = cloud_item.log_url
pynacl.log_tools.WriteAnnotatorLine('@@@STEP_LINK@log@%s@@@' % log_url)
class PackageBuilder(object):
"""Module to build a setup of packages."""
def __init__(self, packages, package_targets, args):
"""Constructor.
Args:
packages: A dictionary with the following format. There are two types of
packages: source and build (described below).
{
'<package name>': {
'type': 'source',
# Source packages are for sources; in particular remote sources
# where it is not known whether they have changed until they are
# synced (it can also or for tarballs which need to be
# unpacked). Source package commands are run unconditionally
# unless sync is skipped via the command-line option. Source
# package contents are not memoized.
'dependencies': # optional
[<list of package depdenencies>],
'output_dirname': # optional
'<directory name>', # Name of the directory to checkout sources
# into (a subdirectory of the global source directory); defaults
# to the package name.
'commands':
[<list of command.Runnable objects to run>],
'inputs': # optional
{<mapping whose keys are names, and whose values are files or
directories (e.g. checked-in tarballs) used as input. Since
source targets are unconditional, this is only useful as a
convenience for commands, which may refer to the inputs by their
key name>},
},
'<package name>': {
'type': 'build',
# Build packages are memoized, and will build only if their
# inputs have changed. Their inputs consist of the output of
# their package dependencies plus any file or directory inputs
# given by their 'inputs' member
'dependencies': # optional
[<list of package depdenencies>],
'inputs': # optional
{<mapping whose keys are names, and whose values are files or
directories (e.g. checked-in tarballs) used as input>},
'output_subdir': # optional
'<directory name>', # Name of a subdir to be created in the output
# directory, into which all output will be placed. If not present
# output will go into the root of the output directory.
'commands':
[<list of command.Command objects to run>],
},
'<package name>': {
'type': 'work',
# Work packages have the same keys as build packages. However,
# they are intended to be intermediate targets, and are not
# memoized or included for package_version.py. Therefore they will
# always run, regardless of whether their inputs have changed or
# of whether source syncing is skipped via the command line.
<same keys as build-type packages>
},
}
package_targets: A dictionary with the following format. This is a
description of output package targets the packages are
built for. Each output package should contain a list of
<package_name> referenced in the previous "packages"
dictionary. This list of targets is expected to stay
the same from build to build, so it should include
package names even if they aren't being built. A package
target is usually the platform, such as "$OS_$ARCH",
while the output package is usually the toolchain name,
such as "nacl_arm_newlib".
{
'<package_target>': {
'<output_package>':
[<list of package names included in output package>]
}
}
args: sys.argv[1:] or equivalent.
"""
self._packages = packages
self._package_targets = package_targets
self.DecodeArgs(packages, args)
self._build_once = once.Once(
use_cached_results=self._options.use_cached_results,
cache_results=self._options.cache_results,
print_url=PrintAnnotatorURL,
storage=self.CreateStorage(),
extra_paths=self.ExtraSubstitutionPaths())
self._signature_file = None
if self._options.emit_signatures is not None:
if self._options.emit_signatures == '-':
self._signature_file = sys.stdout
else:
self._signature_file = open(self._options.emit_signatures, 'w')
def Main(self):
"""Main entry point."""
pynacl.file_tools.MakeDirectoryIfAbsent(self._options.source)
pynacl.file_tools.MakeDirectoryIfAbsent(self._options.output)
pynacl.log_tools.SetupLogging(
verbose=self._options.verbose,
log_file=self._options.log_file,
quiet=self._options.quiet,
no_annotator=self._options.no_annotator)
self.BuildAll()
self.OutputPackagesInformation()
def GetOutputDir(self, package, use_subdir):
# The output dir of source packages is in the source directory, and can be
# overridden.
if self._packages[package]['type'] == 'source':
dirname = self._packages[package].get('output_dirname', package)
return os.path.join(self._options.source, dirname)
else:
root = os.path.join(self._options.output, package + '_install')
if use_subdir and 'output_subdir' in self._packages[package]:
return os.path.join(root, self._packages[package]['output_subdir'])
return root
def BuildPackage(self, package):
"""Build a single package.
Assumes dependencies of the package have been built.
Args:
package: Package to build.
"""
package_info = self._packages[package]
# Validate the package description.
if 'type' not in package_info:
raise Exception('package %s does not have a type' % package)
type_text = package_info['type']
if type_text not in ('source', 'build', 'work'):
raise Exception('package %s has unrecognized type: %s' %
(package, type_text))
is_source_target = type_text == 'source'
is_build_target = type_text == 'build'
if 'commands' not in package_info:
raise Exception('package %s does not have any commands' % package)
# Source targets are the only ones to run when doing sync-only.
if not is_source_target and self._options.sync_sources_only:
logging.debug('Build skipped: not running commands for %s' % package)
return
# Source targets do not run when skipping sync.
if is_source_target and not (
self._options.sync_sources or self._options.sync_sources_only):
logging.debug('Sync skipped: not running commands for %s' % package)
return
pynacl.log_tools.WriteAnnotatorLine(
'@@@BUILD_STEP %s (%s)@@@' % (package, type_text))
logging.debug('Building %s package %s' % (type_text, package))
dependencies = package_info.get('dependencies', [])
# Collect a dict of all the inputs.
inputs = {}
# Add in explicit inputs.
if 'inputs' in package_info:
for key, value in package_info['inputs'].iteritems():
if key in dependencies:
raise Exception('key "%s" found in both dependencies and inputs of '
'package "%s"' % (key, package))
inputs[key] = value
elif type_text != 'source':
# Non-source packages default to a particular input directory.
inputs['src'] = os.path.join(self._options.source, package)
# Add in each dependency by package name.
for dependency in dependencies:
inputs[dependency] = self.GetOutputDir(dependency, True)
# Each package generates intermediate into output/<PACKAGE>_work.
# Clobbered here explicitly.
work_dir = os.path.join(self._options.output, package + '_work')
if self._options.clobber:
logging.debug('Clobbering working directory %s' % work_dir)
pynacl.file_tools.RemoveDirectoryIfPresent(work_dir)
pynacl.file_tools.MakeDirectoryIfAbsent(work_dir)
output = self.GetOutputDir(package, False)
output_subdir = self.GetOutputDir(package, True)
if not is_source_target or self._options.clobber_source:
logging.debug('Clobbering output directory %s' % output)
pynacl.file_tools.RemoveDirectoryIfPresent(output)
os.makedirs(output_subdir)
commands = package_info.get('commands', [])
# Create a command option object specifying current build.
cmd_options = command_options.CommandOptions(
work_dir=work_dir,
clobber_working=self._options.clobber,
clobber_source=self._options.clobber_source,
trybot=self._options.trybot,
buildbot=self._options.buildbot)
# Do it.
self._build_once.Run(
package, inputs, output,
commands=commands,
cmd_options=cmd_options,
working_dir=work_dir,
memoize=is_build_target,
signature_file=self._signature_file,
subdir=output_subdir)
if not is_source_target and self._options.install:
install = pynacl.platform.CygPath(self._options.install)
logging.debug('Installing output to %s' % install)
pynacl.file_tools.CopyTree(output, install)
def BuildOrder(self, targets):
"""Find what needs to be built in what order to build all targets.
Args:
targets: A list of target packages to build.
Returns:
A topologically sorted list of the targets plus their transitive
dependencies, in an order that will allow things to be built.
"""
order = []
order_set = set()
if self._options.ignore_dependencies:
return targets
def Add(target, target_path):
if target in order_set:
return
if target not in self._packages:
raise Exception('Unknown package %s' % target)
next_target_path = target_path + [target]
if target in target_path:
raise Exception('Dependency cycle: %s' % ' -> '.join(next_target_path))
for dependency in self._packages[target].get('dependencies', []):
Add(dependency, next_target_path)
order.append(target)
order_set.add(target)
for target in targets:
Add(target, [])
return order
def BuildAll(self):
"""Build all packages selected and their dependencies."""
for target in self._targets:
self.BuildPackage(target)
def OutputPackagesInformation(self):
"""Outputs packages information for the built data."""
packages_dir = os.path.join(self._options.output, 'packages')
pynacl.file_tools.RemoveDirectoryIfPresent(packages_dir)
os.makedirs(packages_dir)
built_packages = []
for target, target_dict in self._package_targets.iteritems():
target_dir = os.path.join(packages_dir, target)
pynacl.file_tools.MakeDirectoryIfAbsent(target_dir)
for output_package, components in target_dict.iteritems():
package_desc = package_info.PackageInfo()
include_package = False
for component in components:
if '.' in component:
archive_name = component
else:
archive_name = component + '.tgz'
cache_item = self._build_once.GetCachedCloudItemForPackage(component)
if cache_item is None:
archive_desc = archive_info.ArchiveInfo(name=archive_name)
else:
if cache_item.dir_item:
include_package = True
archive_desc = archive_info.ArchiveInfo(
name=archive_name,
hash=cache_item.dir_item.hash,
url=cache_item.dir_item.url,
log_url=cache_item.log_url)
package_desc.AppendArchive(archive_desc)
# Only output package file if an archive was actually included.
if include_package:
package_file = os.path.join(target_dir, output_package + '.json')
package_desc.SavePackageFile(package_file)
built_packages.append(package_file)
if self._options.packages_file:
packages_file = pynacl.platform.CygPath(self._options.packages_file)
pynacl.file_tools.MakeParentDirectoryIfAbsent(packages_file)
with open(packages_file, 'wt') as f:
f.write('\n'.join(built_packages))
def DecodeArgs(self, packages, args):
"""Decode command line arguments to this build.
Populated self._options and self._targets.
Args:
packages: A list of package names to build.
args: sys.argv[1:] or equivalent.
"""
package_list = sorted(packages.keys())
parser = optparse.OptionParser(
usage='USAGE: %prog [options] [targets...]\n\n'
'Available targets:\n' +
'\n'.join(textwrap.wrap(' '.join(package_list))))
parser.add_option(
'-v', '--verbose', dest='verbose',
default=False, action='store_true',
help='Produce more output.')
parser.add_option(
'-q', '--quiet', dest='quiet',
default=False, action='store_true',
help='Produce no output.')
parser.add_option(
'-c', '--clobber', dest='clobber',
default=False, action='store_true',
help='Clobber working directories before building.')
parser.add_option(
'--cache', dest='cache',
default=DEFAULT_CACHE_DIR,
help='Select directory containing local storage cache.')
parser.add_option(
'-s', '--source', dest='source',
default=DEFAULT_SRC_DIR,
help='Select directory containing source checkouts.')
parser.add_option(
'--git-cache', dest='git_cache',
default=DEFAULT_GIT_CACHE_DIR,
help='Select directory containing the git cache for syncing.')
parser.add_option(
'-o', '--output', dest='output',
default=DEFAULT_OUT_DIR,
help='Select directory containing build output.')
parser.add_option(
'--packages-file', dest='packages_file',
default=None,
help='Output packages file describing list of package files built.')
parser.add_option(
'--no-use-cached-results', dest='use_cached_results',
default=True, action='store_false',
help='Do not rely on cached results.')
parser.add_option(
'--no-use-remote-cache', dest='use_remote_cache',
default=True, action='store_false',
help='Do not rely on non-local cached results.')
parser.add_option(
'--no-cache-results', dest='cache_results',
default=True, action='store_false',
help='Do not cache results.')
parser.add_option(
'--no-pinned', dest='pinned',
default=True, action='store_false',
help='Do not use pinned revisions.')
parser.add_option(
'--no-annotator', dest='no_annotator',
default=False, action='store_true',
help='Do not print annotator headings.')
parser.add_option(
'--trybot', dest='trybot',
default=False, action='store_true',
help='Clean source dirs, run and cache as if on trybot.')
parser.add_option(
'--buildbot', dest='buildbot',
default=False, action='store_true',
help='Clean source dirs, run and cache as if on a non-trybot buildbot.')
parser.add_option(
'--clobber-source', dest='clobber_source',
default=False, action='store_true',
help='Clobber source directories before building')
parser.add_option(
'-y', '--sync', dest='sync_sources',
default=False, action='store_true',
help='Run source target commands')
parser.add_option(
'--sync-only', dest='sync_sources_only',
default=False, action='store_true',
help='Run source target commands only')
parser.add_option(
'--disable-git-cache', dest='disable_git_cache',
default=False, action='store_true',
help='Disable git cache when syncing sources')
parser.add_option(
'--emit-signatures', dest='emit_signatures',
help='Write human readable build signature for each step to FILE.',
metavar='FILE')
parser.add_option(
'-i', '--ignore-dependencies', dest='ignore_dependencies',
default=False, action='store_true',
help='Ignore target dependencies and build only the specified target.')
parser.add_option('--install', dest='install',
help='After building, copy contents of build packages' +
' to the specified directory')
parser.add_option('--log-file', dest='log_file',
default=None, action='store',
help='Log all logging into a log file.')
options, targets = parser.parse_args(args)
if options.trybot and options.buildbot:
print >>sys.stderr, (
'ERROR: Tried to run with both --trybot and --buildbot.')
sys.exit(1)
if options.trybot or options.buildbot:
options.verbose = True
options.quiet = False
options.no_annotator = False
options.sync_sources = True
options.clobber = True
options.emit_signatures = '-'
self._options = options
if not targets:
if self._options.ignore_dependencies:
print >>sys.stderr, (
'ERROR: A target must be specified if ignoring target dependencies')
sys.exit(1)
targets = sorted(packages.keys())
targets = self.BuildOrder(targets)
self._targets = targets
def CreateStorage(self):
"""Create a storage object for this build.
Returns:
A storage object (GSDStorage).
"""
if self._options.buildbot:
return pynacl.gsd_storage.GSDStorage(
write_bucket='nativeclient-once',
read_buckets=['nativeclient-once'])
elif self._options.trybot:
return pynacl.gsd_storage.GSDStorage(
write_bucket='nativeclient-once-try',
read_buckets=['nativeclient-once', 'nativeclient-once-try'])
else:
read_buckets = []
if self._options.use_remote_cache:
read_buckets += ['nativeclient-once']
return pynacl.local_storage_cache.LocalStorageCache(
cache_path=self._options.cache,
storage=pynacl.gsd_storage.GSDStorage(
write_bucket=None,
read_buckets=read_buckets))
def ExtraSubstitutionPaths(self):
"""Returns a dictionary of extra substitution paths allowed for commands."""
if self._options.disable_git_cache:
git_cache_dir = ''
else:
git_cache_dir = self._options.git_cache
return {
'top_srcdir': NACL_DIR,
'git_cache_dir': git_cache_dir,
}
|
|
from sqlalchemy import exc
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import relationship
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
class ForUpdateTest(_fixtures.FixtureTest):
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
cls.mapper_registry.map_imperatively(User, users)
def _assert(
self,
read=False,
nowait=False,
of=None,
key_share=None,
assert_q_of=None,
assert_sel_of=None,
):
User = self.classes.User
s = fixture_session()
q = s.query(User).with_for_update(
read=read, nowait=nowait, of=of, key_share=key_share
)
sel = q._compile_state().statement
assert q._for_update_arg.read is read
assert sel._for_update_arg.read is read
assert q._for_update_arg.nowait is nowait
assert sel._for_update_arg.nowait is nowait
assert q._for_update_arg.key_share is key_share
assert sel._for_update_arg.key_share is key_share
eq_(q._for_update_arg.of, assert_q_of)
eq_(sel._for_update_arg.of, assert_sel_of)
def test_key_share(self):
self._assert(key_share=True)
def test_read(self):
self._assert(read=True)
def test_plain(self):
self._assert()
def test_nowait(self):
self._assert(nowait=True)
def test_of_single_col(self):
User, users = self.classes.User, self.tables.users
self._assert(
of=User.id, assert_q_of=[users.c.id], assert_sel_of=[users.c.id]
)
class BackendTest(_fixtures.FixtureTest):
__backend__ = True
# test against the major backends. We are naming specific databases
# here rather than using requirements rules since the behavior of
# "FOR UPDATE" as well as "OF" is very specific to each DB, and we need
# to run the query differently based on backend.
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
Address, addresses = cls.classes.Address, cls.tables.addresses
cls.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_inner_joinedload_w_limit(self):
User = self.classes.User
sess = fixture_session()
q = (
sess.query(User)
.options(joinedload(User.addresses, innerjoin=True))
.with_for_update()
.limit(1)
)
if testing.against("oracle"):
assert_raises_message(exc.DatabaseError, "ORA-02014", q.all)
else:
q.all()
sess.close()
def test_inner_joinedload_wo_limit(self):
User = self.classes.User
sess = fixture_session()
sess.query(User).options(
joinedload(User.addresses, innerjoin=True)
).with_for_update().all()
sess.close()
def test_outer_joinedload_w_limit(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.addresses, innerjoin=False)
)
if testing.against("postgresql"):
q = q.with_for_update(of=User)
else:
q = q.with_for_update()
q = q.limit(1)
if testing.against("oracle"):
assert_raises_message(exc.DatabaseError, "ORA-02014", q.all)
else:
q.all()
sess.close()
def test_outer_joinedload_wo_limit(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.addresses, innerjoin=False)
)
if testing.against("postgresql"):
q = q.with_for_update(of=User)
else:
q = q.with_for_update()
q.all()
sess.close()
def test_join_w_subquery(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
q1 = sess.query(User).with_for_update().subquery()
sess.query(q1).join(Address).all()
sess.close()
def test_plain(self):
User = self.classes.User
sess = fixture_session()
sess.query(User).with_for_update().all()
sess.close()
class CompileTest(_fixtures.FixtureTest, AssertsCompiledSQL):
"""run some compile tests, even though these are redundant."""
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
Address, addresses = cls.classes.Address, cls.tables.addresses
cls.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_default_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect=default.DefaultDialect(),
)
def test_not_supported_by_dialect_should_just_use_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect=default.DefaultDialect(),
)
def test_postgres_read(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users FOR SHARE",
dialect="postgresql",
)
def test_postgres_read_nowait(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True, nowait=True),
"SELECT users.id AS users_id FROM users FOR SHARE NOWAIT",
dialect="postgresql",
)
def test_postgres_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect="postgresql",
)
def test_postgres_update_of(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(of=User.id),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_of_entity(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(of=User),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_of_entity_list(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
self.assert_compile(
sess.query(User.id, Address.id).with_for_update(
of=[User, Address]
),
"SELECT users.id AS users_id, addresses.id AS addresses_id "
"FROM users, addresses FOR UPDATE OF users, addresses",
dialect="postgresql",
)
def test_postgres_for_no_key_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(key_share=True),
"SELECT users.id AS users_id FROM users FOR NO KEY UPDATE",
dialect="postgresql",
)
def test_postgres_for_no_key_nowait_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(key_share=True, nowait=True),
"SELECT users.id AS users_id FROM users FOR NO KEY UPDATE NOWAIT",
dialect="postgresql",
)
def test_postgres_update_of_list(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(
of=[User.id, User.id, User.id]
),
"SELECT users.id AS users_id FROM users FOR UPDATE OF users",
dialect="postgresql",
)
def test_postgres_update_skip_locked(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(skip_locked=True),
"SELECT users.id AS users_id FROM users FOR UPDATE SKIP LOCKED",
dialect="postgresql",
)
def test_oracle_update(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(),
"SELECT users.id AS users_id FROM users FOR UPDATE",
dialect="oracle",
)
def test_oracle_update_skip_locked(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(skip_locked=True),
"SELECT users.id AS users_id FROM users FOR UPDATE SKIP LOCKED",
dialect="oracle",
)
def test_mysql_read(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User.id).with_for_update(read=True),
"SELECT users.id AS users_id FROM users LOCK IN SHARE MODE",
dialect="mysql",
)
def test_for_update_on_inner_w_joinedload(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User)
.options(joinedload(User.addresses))
.with_for_update()
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users LIMIT %s FOR UPDATE) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id FOR UPDATE",
dialect="mysql",
)
def test_for_update_on_inner_w_joinedload_no_render_oracle(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User)
.options(joinedload(User.addresses))
.with_for_update()
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT anon_2.users_id AS users_id, "
"anon_2.users_name AS users_name FROM "
"(SELECT users.id AS users_id, users.name AS users_name "
"FROM users) anon_2 WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"LEFT OUTER JOIN addresses addresses_1 "
"ON anon_1.users_id = addresses_1.user_id FOR UPDATE",
dialect="oracle",
)
|
|
from unittest import TestCase
from surveygizmo import SurveyGizmo
client = SurveyGizmo(
base_url='',
api_token='token',
api_token_secret='secret',
prepare_url=True,
)
class AccountTests(TestCase):
resource = client.api.account
def test_list(self):
with self.assertRaises(NotImplementedError):
self.resource.list()
def test_get(self):
path, params = self.resource.get()
self.assertEqual(path, 'head/account/')
def test_create(self):
with self.assertRaises(NotImplementedError):
self.resource.create()
def test_update(self):
with self.assertRaises(NotImplementedError):
self.resource.update()
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
with self.assertRaises(NotImplementedError):
self.resource.delete()
class AccountTeamsTests(TestCase):
resource = client.api.accountteams
def test_list(self):
path, params = self.resource.list()
self.assertEqual(path, 'head/accountteams/')
def test_get(self):
path, params = self.resource.get(1)
self.assertEqual(path, 'head/accountteams/1')
def test_create(self):
path, params = self.resource.create('team')
self.assertEqual(path, 'head/accountteams/')
self.assertEqual(params['teamname'], 'team')
def test_update(self):
path, params = self.resource.update(1)
self.assertEqual(path, 'head/accountteams/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1)
self.assertEqual(path, 'head/accountteams/1')
class AccountUserTests(TestCase):
resource = client.api.accountuser
def test_list(self):
path, params = self.resource.list()
self.assertEqual(path, 'head/accountuser/')
def test_get(self):
path, params = self.resource.get(1)
self.assertEqual(path, 'head/accountuser/1')
def test_create(self):
path, params = self.resource.create('user@example.com')
self.assertEqual(path, 'head/accountuser/')
self.assertEqual(params['email'], 'user@example.com')
def test_update(self):
path, params = self.resource.update(1)
self.assertEqual(path, 'head/accountuser/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1)
self.assertEqual(path, 'head/accountuser/1')
class ContactTests(TestCase):
resource = client.api.contact
def test_list(self):
path, params = self.resource.list(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/contact/')
def test_get(self):
path, params = self.resource.get(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/contact/1')
def test_create(self):
path, params = self.resource.create(1, 1, 'user@example.com')
self.assertEqual(path, 'head/survey/1/surveycampaign/1/contact/')
self.assertEqual(params['semailaddress'], 'user@example.com')
def test_update(self):
path, params = self.resource.update(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/contact/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/contact/1')
class ContactListTests(TestCase):
resource = client.api.contactlist
def test_list(self):
path, params = self.resource.list()
self.assertEqual(path, 'head/contactlist/')
def test_get(self):
path, params = self.resource.get(1)
self.assertEqual(path, 'head/contactlist/1')
def test_create(self):
path, params = self.resource.create('Contact List')
self.assertEqual(path, 'head/contactlist/')
self.assertEqual(params['listname'], 'Contact List')
def test_update(self):
path, params = self.resource.update(1, 'user@example.com')
self.assertEqual(path, 'head/contactlist/1')
self.assertEqual(params['semailaddress'], 'user@example.com')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
with self.assertRaises(NotImplementedError):
self.resource.delete()
class EmailMessageTests(TestCase):
resource = client.api.emailmessage
def test_list(self):
path, params = self.resource.list(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/emailmessage/')
def test_get(self):
path, params = self.resource.get(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/emailmessage/1')
def test_create(self):
path, params = self.resource.create(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/emailmessage/')
def test_update(self):
path, params = self.resource.update(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/emailmessage/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1/emailmessage/1')
class SurveyTests(TestCase):
resource = client.api.survey
def test_list(self):
path, params = self.resource.list()
self.assertEqual(path, 'head/survey/')
def test_get(self):
path, params = self.resource.get(1)
self.assertEqual(path, 'head/survey/1')
def test_create(self):
path, params = self.resource.create('My Survey', 'poll')
self.assertEqual(path, 'head/survey/')
self.assertEqual(params['title'], 'My Survey')
self.assertEqual(params['type'], 'poll')
def test_update(self):
path, params = self.resource.update(1)
self.assertEqual(path, 'head/survey/1')
def test_copy(self):
path, params = self.resource.copy(1)
self.assertEqual(path, 'head/survey/1')
def test_delete(self):
path, params = self.resource.delete(1)
self.assertEqual(path, 'head/survey/1')
class SurveyCampaignTests(TestCase):
resource = client.api.surveycampaign
def test_list(self):
path, params = self.resource.list(1)
self.assertEqual(path, 'head/survey/1/surveycampaign/')
def test_get(self):
path, params = self.resource.get(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1')
def test_create(self):
path, params = self.resource.create(1, 'My Campaign', 'email')
self.assertEqual(path, 'head/survey/1/surveycampaign/')
self.assertEqual(params['name'], 'My Campaign')
self.assertEqual(params['type'], 'email')
def test_update(self):
path, params = self.resource.update(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1')
def test_copy(self):
path, params = self.resource.copy(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1')
def test_delete(self):
path, params = self.resource.delete(1, 1)
self.assertEqual(path, 'head/survey/1/surveycampaign/1')
class SurveyOptionTests(TestCase):
resource = client.api.surveyoption
def test_list(self):
path, params = self.resource.list(1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1/surveyoption/')
def test_get(self):
path, params = self.resource.get(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1/surveyoption/1')
def test_create(self):
path, params = self.resource.create(1, 1, 'Option', 'Value')
self.assertEqual(path, 'head/survey/1/surveyquestion/1/surveyoption/')
self.assertEqual(params['title'], 'Option')
self.assertEqual(params['value'], 'Value')
def test_update(self):
path, params = self.resource.update(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1/surveyoption/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1, 1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1/surveyoption/1')
class SurveyPageTests(TestCase):
resource = client.api.surveypage
def test_list(self):
path, params = self.resource.list(1)
self.assertEqual(path, 'head/survey/1/surveypage/')
def test_get(self):
path, params = self.resource.get(1, 1)
self.assertEqual(path, 'head/survey/1/surveypage/1')
def test_create(self):
path, params = self.resource.create(1, 'Page 1')
self.assertEqual(path, 'head/survey/1/surveypage/')
self.assertEqual(params['title'], 'Page 1')
def test_update(self):
path, params = self.resource.update(1, 1)
self.assertEqual(path, 'head/survey/1/surveypage/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1, 1)
self.assertEqual(path, 'head/survey/1/surveypage/1')
class SurveyQuestionTests(TestCase):
resource = client.api.surveyquestion
def test_list(self):
path, params = self.resource.list(1)
self.assertEqual(path, 'head/survey/1/surveyquestion/')
def test_get(self):
path, params = self.resource.get(1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1')
def test_create(self):
path, params = self.resource.create(1, 1)
self.assertEqual(path, 'head/survey/1/surveypage/1/surveyquestion/')
def test_update(self):
path, params = self.resource.update(1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1, 1)
self.assertEqual(path, 'head/survey/1/surveyquestion/1')
class SurveyReportTests(TestCase):
resource = client.api.surveyreport
def test_list(self):
path, params = self.resource.list(1)
self.assertEqual(path, 'head/survey/1/surveyreport/')
def test_get(self):
path, params = self.resource.get(1, 1)
self.assertEqual(path, 'head/survey/1/surveyreport/1')
def test_create(self):
with self.assertRaises(NotImplementedError):
self.resource.create()
def test_update(self):
path, params = self.resource.update(1, 1)
self.assertEqual(path, 'head/survey/1/surveyreport/1')
self.assertEqual(params['copy'], 'false')
def test_copy(self):
path, params = self.resource.copy(1, 1)
self.assertEqual(path, 'head/survey/1/surveyreport/1')
self.assertEqual(params['copy'], 'true')
def test_delete(self):
path, params = self.resource.delete(1, 1)
self.assertEqual(path, 'head/survey/1/surveyreport/1')
class SurveyResponseTests(TestCase):
resource = client.api.surveyresponse
def test_list(self):
path, params = self.resource.list(1)
self.assertEqual(path, 'head/survey/1/surveyresponse/')
def test_get(self):
path, params = self.resource.get(1, 1)
self.assertEqual(path, 'head/survey/1/surveyresponse/1')
def test_create(self):
path, params = self.resource.create(1)
self.assertEqual(path, 'head/survey/1/surveyresponse/')
def test_update(self):
path, params = self.resource.update(1, 1)
self.assertEqual(path, 'head/survey/1/surveyresponse/1')
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
path, params = self.resource.delete(1, 1)
self.assertEqual(path, 'head/survey/1/surveyresponse/1')
class SurveyStatisticTests(TestCase):
resource = client.api.surveystatistic
def test_list(self):
path, params = self.resource.list(1)
self.assertEqual(path, 'head/survey/1/surveystatistic/')
def test_get(self):
with self.assertRaises(NotImplementedError):
self.resource.get()
def test_create(self):
with self.assertRaises(NotImplementedError):
self.resource.create()
def test_update(self):
with self.assertRaises(NotImplementedError):
self.resource.update()
def test_copy(self):
with self.assertRaises(NotImplementedError):
self.resource.copy()
def test_delete(self):
with self.assertRaises(NotImplementedError):
self.resource.delete()
|
|
import socket
from unittest import mock
import pytest
from aiohttp.streams import CORK, StreamWriter
has_ipv6 = socket.has_ipv6
if has_ipv6:
# The socket.has_ipv6 flag may be True if Python was built with IPv6
# support, but the target system still may not have it.
# So let's ensure that we really have IPv6 support.
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
except OSError:
has_ipv6 = False
# nodelay
def test_nodelay_default(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_no_change(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(False)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_exception(loop):
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
assert not writer.tcp_nodelay
def test_set_nodelay_enable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_enable_and_disable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
writer.set_tcp_nodelay(False)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
def test_set_nodelay_enable_ipv6(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
def test_set_nodelay_enable_unix(loop):
# do not set nodelay for unix socket
transport = mock.Mock()
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
assert not writer.tcp_nodelay
def test_set_nodelay_enable_no_socket(loop):
transport = mock.Mock()
transport.get_extra_info.return_value = None
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
assert not writer.tcp_nodelay
assert writer._socket is None
# cork
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_cork_default(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_no_change(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(False)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
assert writer.tcp_cork
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_and_disable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
writer.set_tcp_cork(False)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_ipv6(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
assert writer.tcp_cork
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_unix(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
assert not writer.tcp_cork
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_no_socket(loop):
transport = mock.Mock()
transport.get_extra_info.return_value = None
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
assert not writer.tcp_cork
assert writer._socket is None
def test_set_cork_exception(loop):
transport = mock.Mock()
s = mock.Mock()
s.setsockopt = mock.Mock()
s.family = socket.AF_INET
s.setsockopt.side_effect = OSError
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
assert not writer.tcp_cork
# cork and nodelay interference
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_enabling_cork_disables_nodelay(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_nodelay(True)
writer.set_tcp_cork(True)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
assert writer.tcp_cork
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_enabling_nodelay_disables_cork(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
writer = StreamWriter(proto, transport, loop)
writer.set_tcp_cork(True)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
def test_acquire(loop):
transport = mock.Mock()
writer = StreamWriter(mock.Mock(), transport, loop)
assert writer.available
acquired = None
def cb(tr):
nonlocal acquired
acquired = tr
writer.acquire(cb)
assert not writer.available
assert acquired is transport
def test_release(loop):
transport = mock.Mock()
writer = StreamWriter(mock.Mock(), transport, loop)
writer.available = False
acquired = None
def cb(tr):
nonlocal acquired
acquired = tr
writer.acquire(cb)
assert not writer.available
assert acquired is None
writer.release()
assert not writer.available
assert acquired is transport
writer.release()
assert writer.available
|
|
# File: CeReports.py ; This file is part of Twister.
# version: 3.014
# Copyright (C) 2012-2014 , Luxoft
# Authors:
# Andrei Costachi <acostachi@luxoft.com>
# Cristi Constantin <crconstantin@luxoft.com>
# Daniel Cioata <dcioata@luxoft.com>
# Mihai Tudoran <mtudoran@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the Reporting Server.
It is used to view the results of the test executions.
The reports can be fully customized, by editing the DB.xml file.
The INDEX/ HOME links force the reload of the DB.xml file,
the rest of the links just use the cached data, from last reload.
"""
import os
import sys
import re
import time
import datetime
import json
import mako
import MySQLdb
import cherrypy
from collections import OrderedDict
from mako.template import Template
TWISTER_PATH = os.getenv('TWISTER_PATH')
if not TWISTER_PATH:
print('\n$TWISTER_PATH environment variable is not set! Exiting!\n')
exit(1)
if TWISTER_PATH not in sys.path:
sys.path.append(TWISTER_PATH)
from common.helpers import userHome
from common.tsclogging import logDebug, logInfo, logWarning, logError
from common.xmlparser import DBParser
if mako.__version__ < '0.7':
logWarning('Warning! Mako-template version `{}` is old! Some pages might crash!\n'.format(mako.__version__))
class ReportingServer(object):
"""
Reporting server class.
"""
db_parser = {}
db_servers = {}
glob_fields = {}
glob_reports = {}
glob_redirects = {}
glob_links = {}
timers = {}
def __init__(self, project):
"""
Initialization function.
"""
self.project = project
def load_config(self, usr, force=False):
"""
Read DB Config File for 1 user.
"""
if not os.path.isdir(userHome(usr) + '/twister/config'):
logError('Report Server: Cannot find Twister for user `{}` !'.format(usr))
return False
# Get the path to DB.XML
db_file = self.project.get_user_info(usr, 'db_config')
if not db_file:
logError('Report Server: Null DB.XML file for user `{}`! Nothing to do!'.format(usr))
return False
# Current timer
c_time = time.time()
# Create database parser IF necessary, or FORCED, or old connection...
if force or (usr not in self.db_parser) or (c_time - self.timers.get(usr, 0) > 5.0):
# logDebug('Rebuilding fields, reports and redirects for user `{}`...'.format(usr))
self.timers[usr] = c_time
self.db_parser[usr] = True
self.glob_fields[usr] = OrderedDict()
self.glob_reports[usr] = OrderedDict()
self.glob_redirects[usr] = OrderedDict()
self.glob_links[usr] = [{'name': 'Home', 'link': 'Home', 'type': 'link'}]
# DB.xml + Shared DB parser
users_groups = self.project._parse_users_and_groups()
shared_db_path = users_groups['shared_db_cfg']
db_cfg_role = 'CHANGE_DB_CFG' in users_groups['users'][usr]['roles']
# Use shared DB or not ?
use_shared_db = self.project.get_user_info(usr, 'use_shared_db')
if use_shared_db and use_shared_db.lower() in ['true', 'yes']:
use_shared_db = True
else:
use_shared_db = False
dbp = DBParser(usr, db_file, shared_db_path, use_shared_db)
self.db_servers = dbp.db_config['servers']
report_queries = dbp.get_reports(db_cfg_role)
del dbp
for host_db in report_queries:
self.glob_fields[usr].update(report_queries[host_db]['fields'])
self.glob_redirects[usr].update(report_queries[host_db]['redirects'])
for report_name, report_data in report_queries[host_db]['reports'].iteritems():
srv_name = report_data['srv_name']
# Add the new server name in reports ?
if srv_name not in self.glob_reports[usr]:
self.glob_reports[usr][srv_name] = OrderedDict()
# Add the report inside the server
self.glob_reports[usr][srv_name][report_name] = report_data
# There are more types of reports:
# Normal links, like Home, Help and other normal reports
# Redirect links, that don't contain reports
# Folders, that don't go anywhere, are just labels for reports
for rname, rval in report_queries[host_db]['reports'].iteritems():
# Shared report ?
srv_name = rval['srv_name']
# Link name used in reports
link = ('S&' if srv_name=='Shared' else 'U&') + rname
# Server, user, password
db_server, db_name, db_user, _, _ = host_db
srv_db = '{} server: {}/ {}/ {}'.format(srv_name, db_server, db_name, db_user)
report = {
'name': rname,
'link': link,
'type': 'link',
'folder': rval.get('folder', ''),
'srvr': srv_db
}
self.glob_links[usr].append(report)
for rname, rval in report_queries[host_db]['redirects'].iteritems():
link = ('S&' if rval['srv_name']=='Shared' else 'U&') + rname
self.glob_links[usr].append({
'name': rname,
'link': link,
'type': 'redir',
'folder': ''
})
# Append the Help link at the end
self.glob_links[usr].append({'name': 'Help', 'link': 'Help', 'type': 'link'})
return True
@cherrypy.expose
def index(self, usr=''):
"""
The index page.
"""
if not usr:
users = self.project.list_users()
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(title='Users', usr='#' + '#'.join(users), links=[])
if not os.path.isdir(userHome(usr) + '/twister/config'):
return '<br><b>Error! Username `{}` doesn\'t have a Twister config folder!</b>'.format(usr)
# FORCE re-load all Database XML on INDEX/ HOME links !
self.load_config(usr, True)
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(title='Home', usr=usr, links=self.glob_links[usr])
# Report link 2
@cherrypy.expose
def home(self, usr=''):
""" Get reporting link 2 """
return self.index(usr=usr)
# Report link 3
@cherrypy.expose
def report(self, usr=''):
""" Get reporting link 3 """
return self.index(usr=usr)
# Report link 4
@cherrypy.expose
def reporting(self, usr=''):
""" Get reporting link 4 """
return self.index(usr=usr)
@cherrypy.expose
def help(self, usr=''):
"""
Help page.
"""
if not usr:
return '<br><b>Error! This link should be accessed by passing a username, eg: /help/some_user<b/>'
if not os.path.isdir(userHome(usr) + '/twister/config'):
return '<br><b>Error! Username `{}` doesn\'t have a Twister config folder!</b>'.format(usr)
self.load_config(usr) # Re-load all Database XML
output = Template(filename=TWISTER_PATH + '/server/template/rep_help.htm')
return output.render(title='Help', usr=usr, links=self.glob_links[usr])
@cherrypy.expose
def rep(self, report=None, usr=None, **kw):
"""
Reporting link.
"""
if not usr:
return '<br><b>Error! This link should be accessed by passing a username, eg: /rep/some_user<b/>'
if not os.path.isdir(userHome(usr) + '/twister/config'):
return '<br><b>Error! Username `{}` doesn\'t have a Twister config folder!</b>'.format(usr)
self.load_config(usr) # Re-load all Database XML
cherrypy.response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
cherrypy.response.headers['Pragma'] = 'no-cache'
cherrypy.response.headers['Expires'] = 0
if not report:
raise cherrypy.HTTPRedirect('/error')
# The report name is like "U&..." or "S&..."
rlink = report
shared_db, report = rlink[0], rlink[2:]
if shared_db == 'S':
shared_db = True
srv_name = 'Shared'
else:
shared_db = False
srv_name = 'User'
if report in self.glob_redirects[usr]:
redirect_dict = self.glob_redirects[usr][report]['path']
raise cherrypy.HTTPRedirect(redirect_dict)
if srv_name not in self.glob_reports[usr]:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Missing server', usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Server `<b>{}</b>` is not defined!<br/><br/>'
'Go <a href="/report/home/{}">Home</a> ...'.format(srv_name, usr))
if report not in self.glob_reports[usr][srv_name]:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Missing report', usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Report `<b>{}</b>` is not defined!<br/><br/>'
'Go <a href="/report/home/{}">Home</a> ...'.format(report, usr))
logDebug('Prepare {} report `{}`, for user `{}`...'.format(srv_name, report, usr))
# All info about the report, from DB XML
report_dict = self.glob_reports[usr][srv_name][report]
query = report_dict['sqlquery']
db_server, db_name, db_user, db_passwd, _ = self.db_servers[srv_name]
conn = self.project.dbmgr.connect_db(usr, db_server, db_name, db_user, db_passwd,
shared_db=shared_db)
if not conn:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink, links=self.glob_links[usr],
msg='Cannot connect to MySql server `{} / {}` !'.format(db_server, db_name))
curs = conn.cursor()
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query)
# ------------------------------------------------------------------------------------------
# If the user didn't select fields YET :
# ------------------------------------------------------------------------------------------
if vars_to_replace and not cherrypy.request.params:
# Options are defined as: Type, Label, Data
u_options = OrderedDict()
for opt in vars_to_replace:
u_field = self.glob_fields[usr].get(opt.replace('@', ''))
this_option = {}
if not u_field:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Cannot build query!<br><br>Field `<b>{}</b>` '\
'is not defined in the fields section!'.format(opt.replace('@', '')))
this_option['type'] = u_field.get('type')
this_option['label'] = u_field.get('label')
# Field type : User Select
if this_option['type'] == 'UserSelect':
u_query = u_field.get('sqlquery')
if not u_query:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Cannot build query!<br><br>Field `<b>{}</b>` doesn\'t '\
'have a query!'.format(opt.replace('@', '')))
# Execute User Query
try:
curs.execute(u_query)
except MySQLdb.Error as err:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Error in query `{}`!<br><br><b>MySQL Error {}</b>: {}!'.format(
u_query, err.args[0], err.args[1]))
try:
u_vals = curs.fetchall()
except Exception as err:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Error in query `{}`!<br><br><b>Exception</b>: {}!'.format(u_query, err))
# No data available
if not u_vals:
this_option['data'] = []
# Data has one column
elif len(u_vals[0]) == 1:
field_data = [(val[0], val[0]) for val in u_vals]
this_option['data'] = field_data
# Data has 2 or more columns
else:
field_data = [(str(val[0]), str(val[0]) + ': ' + '| '.join(val[1:])) for val in u_vals]
this_option['data'] = field_data
# Field type : User Text
elif this_option['type'] == 'UserText':
this_option['data'] = ''
else:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink,
links=self.glob_links[usr],
msg='Field `<b>{}</b>` is of unknown type: <b>{}</b>!'.format(
opt.replace('@', ''), this_option['type']))
u_options[opt] = this_option
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(title=report, usr=usr, rlink=rlink,
links=self.glob_links[usr], options=u_options)
# ------------------------------------------------------------------------------------------
# If the user has selected the fields :
# ------------------------------------------------------------------------------------------
ajax_links = []
# ... For normal Queries ...
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
if not u_select:
u_select = ''
ajax_links.append(field +'='+ u_select)
# Replace @variables@ with user chosen value
query = query.replace(field, str(u_select))
ajax_links = sorted(set(ajax_links))
ajax_link = '/report/json/' + rlink + '/' + usr + '?' + '&'.join(ajax_links)
user_choices = ('", '.join(ajax_links))
user_choices = user_choices.replace('@', '').replace('=', '="') + '"'
del ajax_links
try:
curs.execute(query)
except MySQLdb.Error as err:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink, links=self.glob_links[usr],
msg='Error in query `{}`!<br><br>' \
'<b>MySQL Error {}</b>: {}!'.format(query, err.args[0], err.args[1]))
descr = [desc[0] for desc in curs.description]
# ... For Query Compare side by side, the table is double ...
query_compr = report_dict['sqlcompr']
if query_compr:
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query_compr)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query_compr = query_compr.replace(field, str(u_select))
try:
curs.execute(query_compr)
except MySQLdb.Error as err:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, usr=usr, rlink=rlink, links=self.glob_links[usr],
msg='Error in query `{}`!<br><br>' \
'<b>MySQL Error {}</b>: {}!'.format(query_compr, err.args[0], err.args[1]))
headers_tot = [desc[0] for desc in curs.description]
# Update headers: must contain both headers.
descr = descr + ['vs.'] + headers_tot
# Write DEBUG
#DEBUG.write(report +' -> '+ user_choices +' -> '+ query_compr + '\n\n') ; DEBUG.flush()
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(usr=usr, title=report, rlink=rlink, links=self.glob_links[usr],
ajax_link=ajax_link, user_choices=user_choices,
report=descr, chart=report_dict['type'])
@cherrypy.expose
def json(self, report, usr, **args):
"""
The report data, in json format.
"""
if not usr:
output = {'aaData':[], 'error':'Error! This link should be '\
'accessed by passing a username, eg: ' \
'/json/some_report/some_user'}
return json.dumps(output, indent=2)
if not os.path.isdir(userHome(usr) + '/twister/config'):
output = {'aaData':[], 'error':'Error! Username `{}` doesn\'t have '\
'a Twister config folder!'.format(usr)}
return json.dumps(output, indent=2)
self.load_config(usr) # Re-load all Database XML
cherrypy.response.headers['Content-Type'] = 'application/json; charset=utf-8'
cherrypy.response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
cherrypy.response.headers['Pragma'] = 'no-cache'
cherrypy.response.headers['Expires'] = 0
# The report name is like "U&..." or "S&..."
shared_db, report = report[0], report[2:]
if shared_db == 'S':
shared_db = True
srv_name = 'Shared'
else:
shared_db = False
srv_name = 'User'
if srv_name not in self.glob_reports[usr]:
output = {'aaData':[], 'error':'Server `{}` is not in the list of defined servers!'.format(srv_name)}
return json.dumps(output, indent=2)
if report not in self.glob_reports[usr][srv_name]:
output = {'aaData':[], 'error':'Report `{}` is not in the list of defined reports!'.format(report)}
return json.dumps(output, indent=2)
# All info about the report, from DB XML.
report_dict = self.glob_reports[usr][srv_name][report]
query = report_dict['sqlquery']
db_server, db_name, db_user, db_passwd, _ = self.db_servers[srv_name]
conn = self.project.dbmgr.connect_db(usr, db_server, db_name, db_user, db_passwd,
shared_db=shared_db)
if not conn:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(links=self.glob_links[usr], title=report, usr=usr,
msg='Cannot connect to MySql server `{} / {}` !'.format(db_server, db_name))
curs = conn.cursor()
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query = query.replace(field, str(u_select))
try:
curs.execute(query)
except MySQLdb.Error as err:
output = {'aaData':[], 'error':'Error in query `{}`! ' \
'MySQL Error {}: {}!'.format(query, err.args[0], err.args[1])}
return json.dumps(output, indent=2)
headers = [desc[0] for desc in curs.description]
rows = curs.fetchall()
del query
query_total = report_dict['sqltotal']
query_compr = report_dict['sqlcompr']
# ... Calculate SQL Query Total ...
if query_total:
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query_total)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query_total = query_total.replace(field, str(u_select))
try:
curs.execute(query_total)
except MySQLdb.Error as err:
output = {'aaData':[], 'error':'Error in query total `{}`! ' \
'MySQL Error {}: {}!'.format(query_total, err.args[0], err.args[1])}
return json.dumps(output, indent=2)
headers_tot = [desc[0] for desc in curs.description]
rows_tot = curs.fetchall()
if len(headers) != len(headers_tot):
output = {'aaData':[], 'error':'The first query has {} columns and the second has {} columns!'
.format(len(headers), len(headers_tot))}
return json.dumps(output, indent=2)
# Will calculate the new rows like this:
# The first column of the first query will not be changed
# The second row of the first query / the second row of the second query * 100
calc_rows = []
rows = {r[0]:r[1] for r in rows}
rows_tot = {r[0]:r[1] for r in rows_tot}
for rnb in rows_tot.keys():
if rnb in rows.keys():
# Calculate percent...
percent = '%.2f' % (float(rows[rnb]) / rows_tot[rnb] * 100.0)
# Using the header from Total, because it might be Null in the first query
calc_rows.append([rnb, float(percent)])
else:
calc_rows.append([rnb, 0.0])
# ... SQL Query Compare side by side ...
elif query_compr:
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query_compr)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query_compr = query_compr.replace(field, str(u_select))
try:
curs.execute(query_compr)
except MySQLdb.Error as err:
output = {'aaData':[], 'error':'Error in query compare `{}`! '\
'MySQL Error {}: {}!'.format(query_total, err.args[0], err.args[1])}
return json.dumps(output, indent=2)
headers_tot = [desc[0] for desc in curs.description]
rows_tot = curs.fetchall()
if len(headers) != len(headers_tot): # Must be the same number of columns
output = {'aaData':[], 'error':'The first query has {} columns and the second has {} columns!'
.format(len(headers), len(headers_tot))}
return json.dumps(output, indent=2)
headers_len = len(headers)
rows_max_size = max(len(rows), len(rows_tot))
calc_rows = []
for i in range(rows_max_size):
row1 = rows[i:i+1]
row2 = rows_tot[i:i+1]
if not row1:
row1 = [' ' for i in range(headers_len)]
else:
row1 = row1[0]
if not row2:
row2 = [' ' for i in range(headers_len)]
else:
row2 = row2[0]
calc_rows.append(tuple(row1) +(' <---> ',)+ tuple(row2))
# Update headers: must contain both headers.
headers = headers + ['vs.'] + headers_tot
# ... Normal Query ...
else:
calc_rows = rows
del rows
if (not calc_rows) or (not calc_rows[0:1]):
output = {'aaData':[], 'error':'The select is empty!'}
return json.dumps(output, indent=2)
if isinstance(calc_rows[0][0], datetime.datetime):
is_date = True
else:
is_date = False
dthandler = lambda obj: obj.strftime('%Y-%m-%d %H:%M:%S') if isinstance(obj, datetime.datetime) else None
return json.dumps({'headers':headers, 'type':report_dict['type'], 'isDate':is_date, 'aaData':calc_rows},
indent=2, default=dthandler)
@cherrypy.expose
def error(self, **args):
"""
The error page.
"""
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Error 404', links=[], msg='Sorry, this page does not exist!')
@cherrypy.expose
def default(self, **args):
"""
The error page.
"""
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Error 404', links=[], msg='Sorry, this page does not exist!')
# Eof()
|
|
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`core` --- Core beacon server
==================================
"""
# Stdlib
import logging
from collections import defaultdict
# SCION
from beacon_server.base import BeaconServer, BEACONS_PROPAGATED
from lib.defines import GEN_CACHE_PATH
from lib.errors import SCIONServiceLookupError
from lib.packet.ctrl_pld import CtrlPayload
from lib.packet.opaque_field import InfoOpaqueField
from lib.packet.path_mgmt.base import PathMgmt
from lib.packet.path_mgmt.seg_recs import PathRecordsReg
from lib.packet.pcb import PathSegment
from lib.path_store import PathStore
from lib.types import PathSegmentType as PST, ServiceType
from lib.util import SCIONTime
class CoreBeaconServer(BeaconServer):
"""
PathConstructionBeacon Server in a core AS.
Starts broadcasting beacons down-stream within an ISD and across ISDs
towards other core beacon servers.
"""
def __init__(self, server_id, conf_dir, spki_cache_dir=GEN_CACHE_PATH,
prom_export=None, sciond_path=None, filter_isd_loops=False):
"""
:param str server_id: server identifier.
:param str conf_dir: configuration directory.
:param str prom_export: prometheus export address.
:param str sciond_path: path to sciond socket
:param str filter_isd_loops: filter ISD loops
"""
super().__init__(server_id, conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, sciond_path=sciond_path)
# Sanity check that we should indeed be a core beacon server.
assert self.topology.is_core_as, "This shouldn't be a local BS!"
self.core_beacons = defaultdict(self._ps_factory)
self.filter_isd_loops = filter_isd_loops
def _ps_factory(self):
"""
:returns:
:rtype:
"""
return PathStore(self.path_policy)
def propagate_core_pcb(self, pcb):
"""
Propagates the core beacons to other core ASes.
"""
propagated_pcbs = defaultdict(list)
prop_cnt = 0
for intf in self.topology.core_interfaces:
dst_ia = intf.isd_as
if not self._filter_pcb(pcb, dst_ia=dst_ia):
continue
new_pcb, meta = self._mk_prop_pcb_meta(
pcb.copy(), intf.isd_as, intf.if_id)
if not new_pcb:
continue
self.send_meta(CtrlPayload(new_pcb.pcb()), meta)
propagated_pcbs[(intf.isd_as, intf.if_id)].append(pcb.short_id())
prop_cnt += 1
if self._labels:
BEACONS_PROPAGATED.labels(**self._labels, type="core").inc(prop_cnt)
return propagated_pcbs
def handle_pcbs_propagation(self):
"""
Generate a new beacon or gets ready to forward the one received.
"""
timestamp = int(SCIONTime.get_time())
# Create beacon for downstream ASes.
down_iof = InfoOpaqueField.from_values(timestamp, self.addr.isd_as[0])
downstream_pcb = PathSegment.from_values(down_iof)
propagated_pcbs = self.propagate_downstream_pcb(downstream_pcb)
# Create beacon for core ASes.
core_iof = InfoOpaqueField.from_values(timestamp, self.addr.isd_as[0])
core_pcb = PathSegment.from_values(core_iof)
propagated = self.propagate_core_pcb(core_pcb)
for k, v in propagated.items():
propagated_pcbs[k].extend(v)
# Propagate received beacons. A core beacon server can only receive
# beacons from other core beacon servers.
beacons = []
with self._rev_seg_lock:
for ps in self.core_beacons.values():
beacons.extend(ps.get_best_segments())
for pcb in beacons:
propagated = self.propagate_core_pcb(pcb)
for k, v in propagated.items():
propagated_pcbs[k].extend(v)
self._log_propagations(propagated_pcbs)
def register_segments(self):
self.register_core_segments()
def register_core_segment(self, pcb, svc_type):
"""
Send core-segment to Local Path Servers and Sibra Servers
:raises:
SCIONServiceLookupError: service type lookup failure
"""
pcb.sign(self.signing_key)
# Register core path with local core path server.
addr, port = self.dns_query_topo(svc_type)[0]
records = PathRecordsReg.from_values({PST.CORE: [pcb]})
meta = self._build_meta(host=addr, port=port, reuse=True)
self.send_meta(CtrlPayload(PathMgmt(records.copy())), meta)
return meta
def _filter_pcb(self, pcb, dst_ia=None):
"""
Check that there are no AS- or ISD-level loops in the PCB.
An AS-level loop is where a beacon passes through any AS more than once.
An ISD-level loop is where a beacon passes through any ISD more than
once.
"""
# Add the current ISD-AS to the end, to look for loops in the final list
# of hops.
isd_ases = [asm.isd_as() for asm in pcb.iter_asms()]
isd_ases.append(self.addr.isd_as)
# If a destination ISD-AS is specified, add that as well. Used to decide
# when to propagate.
if dst_ia:
isd_ases.append(dst_ia)
isds = set()
last_isd = 0
for isd_as in isd_ases:
if isd_ases.count(isd_as) > 1:
# This ISD-AS has been seen before
return False
curr_isd = isd_as[0]
if curr_isd == last_isd:
continue
# Switched to a new ISD
last_isd = curr_isd
if self.filter_isd_loops and curr_isd in isds:
# This ISD has been seen before
return False
isds.add(curr_isd)
return True
def _handle_verified_beacon(self, pcb):
"""
Once a beacon has been verified, place it into the right containers.
:param pcb: verified path segment.
:type pcb: PathSegment
"""
with self._rev_seg_lock:
self.core_beacons[pcb.first_ia()].add_segment(pcb)
def register_core_segments(self):
"""
Register the core segment between core ASes.
"""
core_segments = []
with self._rev_seg_lock:
for ps in self.core_beacons.values():
core_segments.extend(ps.get_best_segments(sending=False))
registered_paths = defaultdict(list)
for pcb in core_segments:
new_pcb = self._terminate_pcb(pcb)
if not new_pcb:
continue
try:
dst_meta = self.register_core_segment(new_pcb, ServiceType.PS)
except SCIONServiceLookupError as e:
logging.warning("Unable to send core-segment registration: %s", e)
continue
# Keep the ID of the not-terminated PCB to relate to previously received ones.
registered_paths[(str(dst_meta), ServiceType.PS)].append(pcb.short_id())
self._log_registrations(registered_paths, "core")
def _remove_revoked_pcbs(self, rev_info):
candidates = []
with self._rev_seg_lock:
for ps in self.core_beacons.values():
candidates += ps.candidates
to_remove = self._pcb_list_to_remove(candidates, rev_info)
# Remove the affected segments from the path stores.
for ps in self.core_beacons.values():
ps.remove_segments(to_remove)
|
|
#
# License: MIT
# https://raw.githubusercontent.com/stonier/feed_the_troll/devel/LICENSE
#
##############################################################################
# Description
##############################################################################
"""
.. module:: servers
:platform: Unix
:synopsis: Ready to rumble servers
Various servers tailor made in the feeder-troll style to suit various purposes.
"""
##############################################################################
# Imports
##############################################################################
import dynamic_reconfigure.server
import feed_the_troll
import functools
import importlib
import rosgraph
import rospy
import termcolor
import threading
##############################################################################
# helpers
##############################################################################
def validate_server_parameters(server_parameters):
"""
Take in a reconfiguration setup on the parameter server (dic of dyn reconf
server names with corresponding namespace, module & overrides values) and
check if it is in a proper format as well as whether the module too exists.
"""
error_messages = []
for v in server_parameters.values():
if 'module' not in v:
error_messages.append("no dynamic reconfigure 'module' specified in the reconfigure server settings (e.g. 'feed_the_troll.cfg.DemoConfig']")
continue
try:
importlib.import_module(v['module'])
except ImportError:
error_messages.append("could not import dynamic reconfigure module [{0}]".format(v['module']))
continue
return error_messages
def namespace_from_configuration(server_name, server_configuration):
"""
:param ... configuration: troll reconfiguration server parameters (name, namespace, overrides)
"""
namespace = server_configuration['namespace'] if 'namespace' in server_configuration else server_name
if not namespace.startswith('/') and not namespace.startswith('~'):
namespace = rosgraph.names.ns_join('~', namespace)
return rospy.resolve_name(namespace)
##############################################################################
# ReConfiguration
##############################################################################
class ReconfigureServerInfo(object):
"""
A simple class holding current information about a running dynamic
reconfigure server.
"""
def __init__(self):
self.is_default_instance = False # is the current instance a default server?
self.default_configuration = None # if there is a default server, what is it's configuration?
self.namespace = None # where the parameters are stored (dyn reconf server doesnt actuall save this, so we do
self.server = None # the currently running instance
class ReConfiguration(object):
"""
**About**
*Q) What to do when you need to share dynamic_reconfigure variables
amongst a collection of nodes?*
A good example is sharing navigation motion constraints amongst
a collection of nodes that control the motion of the base (general
navigation, parking, docking, ...)
This standalone node volunteers for the responsibility of loading and
managing dynamic reconfigure servers from a central location. All you
need do is feed it with a yaml/rosparam configuration that will define
the dynamic_reconfigure servers you wish to fire up along with any
initial parameter overrides to use when instantiating them.
It also manages these for free. That is, it is able to bring the
dynamic reconfigure servers up and down in sync with the starting
up and tearing down of your higher level applications.
*Reconfigure your Reconfiguration!*
**Usage - Reconfiguration Server**
You will need to prepare the following (no coding necessary!):
* A yaml defining the dyn-recfg servers you need
* A launcher for starting the reconfiguration server
* A launcher for starting the feeder with your yaml configuration
If you're familiar with nodelet managers and nodelets, this process is similar.
When the feeder node launches it sends a request to the server to fire up
the specified list of dynamic reconfigure servers. When it terminates,
it will shoot one last service call off to the reconfiguration server to
shutdown the previously started dynamic reconfigure servers.
**Example - Reconfiguration Server**
An example set of files (also available as a demo within this package):
.. code-block:: xml
.. literalinclude:: ../launch/demo_reconfiguration_server.launch
:language: xml
feed it using this package's parameter feeder:
.. literalinclude:: ../launch/demo_reconfiguration_feeder.launch
:language: xml
.. literalinclude:: ../parameters/demo_reconfiguration.yaml
:language: yaml
**Usage - Reconfiguration Clients**
Client programs that need to tune into the dynamic reconfigure servers simply
need to instantiate a dynamic reconfigure client, or more simply, a subscriber
listening to the dynamic reconfigure server's private ``parameter_updates`` topic.
**Examples - Reconfiguration Clients**
Python:
.. literalinclude:: ../scripts/demo_reconfiguration_client.py
:language: python
:lines: 10-27
:ivar debug: print verbose debugging information on loading/unloading/updating
"""
def __init__(self):
"""
"""
self.guard = threading.Lock()
self.reconfigure_servers = {}
self.debug = rospy.get_param("~debug", False)
self._start_default_servers()
# must come after everything else is set up
self.troll = feed_the_troll.trolls.ROSParameters(
loading_handler=self._load,
unloading_handler=self._unload
)
def _start_server(self, server_name, server_configuration):
"""
:param str name: unique name string for the server
:param ... configuration: troll reconfiguration server parameters (name, namespace, overrides)
"""
reconfigure_module = importlib.import_module(server_configuration['module'])
reconfigure_server_namespace = namespace_from_configuration(server_name, server_configuration)
#
# Reset: Start with a clean slate - i.e. ensure that any settings from a
# previous feeder (or other rubbish) is cleared out. Easiest done by
# writing in the dyn reconf server defaults.
#
# Note: Simply deleting/overwriting the entire the namespace (brute force) works
# but only if it is assumed that the user has reserved the naemspace for this
# dyn reconf server. This is not always true and if there are other static
# params there, they will be wiped out too.
defaults = reconfigure_module.defaults.copy()
for name, value in defaults.iteritems():
full_parameter_path = reconfigure_server_namespace + "/" + name
if 'overrides' in server_configuration and name in server_configuration['overrides'].keys():
rospy.set_param(full_parameter_path, server_configuration['overrides'][name])
else:
rospy.set_param(full_parameter_path, value)
return dynamic_reconfigure.server.Server(
reconfigure_module,
functools.partial(self.callback, name=server_name),
namespace=reconfigure_server_namespace
)
def _start_default_servers(self):
try:
default_server_parameters = rospy.get_param("~servers")
except KeyError:
return # nothing to do
error_messages = validate_server_parameters(default_server_parameters)
if error_messages:
rospy.logerr("Reconfiguration: errors in the default server configurations")
for message in error_messages:
rospy.logerr(" : {0}".format(message))
return
for server_name, server_configuration in default_server_parameters.iteritems():
self.reconfigure_servers[server_name] = ReconfigureServerInfo()
self.reconfigure_servers[server_name].is_default_instance = True
self.reconfigure_servers[server_name].default_configuration = server_configuration
self.reconfigure_servers[server_name].namespace = namespace_from_configuration(server_name, server_configuration)
self.reconfigure_servers[server_name].server = self._start_server(server_name, server_configuration)
def _load(self, unique_identifier, namespace):
"""
:param uuid.UUID unique_identifier:
:param str namespace: root namespace for configuration on the parameter server
"""
try:
incoming_configuration = rospy.get_param(namespace)
except KeyError:
error_message = "could not retrieve parameters for configuration [{0}]".format(namespace)
rospy.logerr("Reconfiguration: {0}".format(error_message))
return (False, error_message)
self._pretty_print_incoming("Reconfigure Loading", unique_identifier, namespace, incoming_configuration)
# checks
error_messages = validate_server_parameters(incoming_configuration)
with self.guard:
for k in incoming_configuration.keys():
if k in self.reconfigure_servers and not self.reconfigure_servers[k].is_default_instance:
error_messages.append("this reconfigure server is already being served [{0}]".format(k))
continue
if error_messages:
rospy.logerr("Reconfiguration: errors loading the passed parameterisations")
for message in error_messages:
rospy.logerr(" : {0}".format(message))
return (False, ', '.join(error_messages))
# setup
with self.guard:
for server_name, server_configuration in incoming_configuration.iteritems():
if server_name not in self.reconfigure_servers:
self.reconfigure_servers[server_name] = ReconfigureServerInfo()
self.reconfigure_servers[server_name].is_default_instance = False
self.reconfigure_servers[server_name].default_configuration = None
self.reconfigure_servers[server_name].namespace = namespace_from_configuration(server_name, server_configuration)
self.reconfigure_servers[server_name].server = self._start_server(server_name, server_configuration)
else:
# at this point, we know it is a deafult instance (we reject running and non-default above)
# 1. save the latest default configuration
current_dynamic_reconfigure_parameters = rospy.get_param(self.reconfigure_servers[server_name].namespace)
self.reconfigure_servers[server_name].default_configuration['overrides'] = current_dynamic_reconfigure_parameters
# 2. set the new parameters
# magically merge current and incoming (this only works when keys are strings - http://treyhunner.com/2016/02/how-to-merge-dictionaries-in-python/
new_parameters = dict(current_dynamic_reconfigure_parameters, **incoming_configuration[server_name]['overrides'])
self.reconfigure_servers[server_name].server.update_configuration(new_parameters)
# 3. set is_default_instance to False
self.reconfigure_servers[server_name].is_default_instance = False
return (True, "Success")
def _unload(self, unique_identifier, namespace):
"""
:param uuid.UUID unique_identifier:
"""
error_messages = []
parameters = rospy.get_param(namespace)
self._pretty_print_incoming("Reconfigure Unloading", unique_identifier, namespace, parameters)
with self.guard:
for server_name, unused_v in parameters.iteritems():
reflected_parameters = rosgraph.names.ns_join("~", server_name)
if rospy.has_param(reflected_parameters):
rospy.delete_param(reflected_parameters)
if server_name not in self.reconfigure_servers:
error_messages.append("could not find server to unload [{0}]".format(server_name))
continue
if self.reconfigure_servers[server_name].is_default_instance:
error_messages.append("refusing to unload a default instance [{0}]".format(server_name))
continue
server_info = self.reconfigure_servers[server_name]
if server_info.default_configuration is None: # its a server created on the fly
server_info.server.set_service.shutdown()
server_info.server.descr_topic.unregister()
server_info.server.update_topic.unregister()
del server_info.server.set_service
del server_info.server
self.reconfigure_servers.pop(server_name)
else: # there is a default instance and configuration behind it
server_info.server.update_configuration(server_info.default_configuration['overrides'])
server_info.is_default_instance = True
if error_messages:
rospy.logerr("Reconfiguration: errors while unloading")
for message in error_messages:
rospy.logerr(" : {0}".format(message))
return (False, ', '.join(error_messages))
else:
return (True, "Success")
def callback(self, config, level, name):
"""
The name is an additional argument not usually in a dynamic reconfigure server callback, but is fixed by
a functools partial so that we can provide extra useful debugging information by stating *which* dynamic
reconfigure server it relates to.
:param dynamic_reconfigure.encoding.Config config: dynamic reconfigure configuration object, holds all the variables
:param int level:
:param str name: name of the reconfiguration server for which these configuration variables apply
"""
if self.debug:
print("")
termcolor.cprint("Reconfiguration Updating", 'white', attrs=['bold'])
print("")
termcolor.cprint(" Reconfigure Server", "green")
print(" " + termcolor.colored("{0: <23}".format("Name"), 'cyan') + ": " + termcolor.colored("{0}".format(name), 'yellow'))
print(" " + termcolor.colored("{0: <23}".format("Namespace"), 'cyan') + ": " + termcolor.colored("{0}".format(self.reconfigure_servers[name].namespace), 'yellow'))
termcolor.cprint(" Overrides", "cyan")
for k, v in config.iteritems():
if k != "groups":
print(" " + termcolor.colored("{0: <21}".format(k), 'cyan') + ": " + termcolor.colored("{0}".format(v), 'yellow'))
return config
def _pretty_print_incoming(self, title, unique_identifier, namespace, parameters):
if self.debug:
print("")
termcolor.cprint(title, 'white', attrs=['bold'])
print("")
print(" " + termcolor.colored("{0: <25}".format('Feeder'), 'green'))
print(" " + termcolor.colored("{0: <23}".format('Namespace'), 'cyan') + ": " + termcolor.colored("{0}".format(namespace), 'yellow'))
print(" " + termcolor.colored("{0: <23}".format('Unique Identifier'), 'cyan') + ": " + termcolor.colored("{0}".format(unique_identifier), 'yellow'))
for k, v in parameters.iteritems():
print(" " + termcolor.colored("{0}".format("Reconfigure Server"), 'green'))
print(" " + termcolor.colored("{0: <23}".format("Name"), 'cyan') + ": " + termcolor.colored("{0}".format(k), 'yellow'))
print(" " + termcolor.colored("{0: <23}".format("Namespace"), 'cyan') + ": " + termcolor.colored("{0}".format(namespace_from_configuration(k, v)), 'yellow'))
if 'module' in v:
print(" " + termcolor.colored("{0: <23}".format("Type"), 'cyan') + ": " + termcolor.colored("{0}".format(v['module']), 'yellow'))
else:
print(" " + termcolor.colored("{0: <23}".format("Type"), 'cyan') + ": " + termcolor.colored("missing", 'red'))
if 'overrides' in v:
print(" " + termcolor.colored("{0: <23}".format("Overrides"), 'cyan'))
for k2, v2 in v['overrides'].iteritems():
print(" " + termcolor.colored("{0: <21}".format(k2), 'cyan') + ": " + termcolor.colored("{0}".format(v2), 'yellow'))
def spin(self):
rospy.spin()
|
|
# -*- coding: utf-8 -*-
"""PyXB stands for Python U{W3C XML
Schema<http://www.w3.org/XML/Schema>} Bindings, and is pronounced
"pixbee". It enables translation between XML instance documents and
Python objects following rules specified by an XML Schema document.
This is the top-level entrypoint to the PyXB system. Importing this
gets you all the L{exceptions<pyxb.exceptions_.PyXBException>}, and
L{pyxb.namespace}. For more functionality, delve into these
submodules:
- L{pyxb.xmlschema} Module holding the
L{structures<pyxb.xmlschema.structures>} that convert XMLSchema
from a DOM model to a Python class model based on the XMLSchema
components. Use this when you need to operate on the component
model.
- L{pyxb.binding} Module used to generate the bindings and at runtime
to support the generated bindings. Use this if you need to use the
binding model or content model.
- L{pyxb.utils} Common utilities used in parsing, generating, and
executing. The submodules must be imported separately.
"""
import logging
_log = logging.getLogger(__name__)
class cscRoot (object):
"""This little bundle of joy exists because in Python 2.6 it
became an error to invoke C{object.__init__} with parameters (unless
you also override C{__new__}, in which case it's only a warning.
Whatever.). Since I'm bloody not going to check in every class
whether C{super(Myclass,self)} refers to C{object} (even if I could
figure out how to do that, 'cuz the obvious solutions don't work),
we'll just make this thing the root of all U{cooperative super
calling<http://www.geocities.com/foetsch/python/new_style_classes.htm#super>}
hierarchies. The standard syntax in PyXB for this pattern is::
def method_csc (self, *args, **kw):
self_fn = lambda *_args, **_kw: self
super_fn = getattr(super(ThisClass, self), 'method_csc', self_fn)
return super_fn(*args, **kw)
"""
def __init__ (self, *args, **kw):
# Oh gross. If this class descends from list (and probably dict), we
# get here when object is *not* our direct superclass. In that case,
# we have to pass the arguments on up, or the strings don't get
# created right. Below is the only way I've figured out to detect the
# situation.
#
# Note that we might also get here if you mix-in a class that used
# object as a parent instead of cscRoot. Don't do that. Printing the
# mro() is a decent way of identifying the problem.
if issubclass(self.__class__.mro()[-2], ( list, dict )):
super(cscRoot, self).__init__(*args)
__version__ = '1.2.3'
"""The version of PyXB"""
__url__ = 'http://pyxb.sourceforge.net'
"""The URL for PyXB's homepage"""
__license__ = 'Apache License 2.0'
# Bring in the exception hierarchy
from .exceptions_ import *
# Bring in namespace stuff
from . import namespace
class BIND (object):
"""Bundle data for automated binding generation.
Instances of this class capture positional and keyword arguments that are
used to create binding instances based on context. For example, if C{w}
is an instance of a complex type whose C{option} element is declared to be
an anonymous class with simple content of type integer and an attribute of
C{units}, a correct assignment to that element could be achieved with::
w.option = BIND(54, units="m")
"""
__args = None
__kw = None
def __init__ (self, *args, **kw):
"""Cache parameters for subsequent binding creation.
Invoke just as you would the factory for a binding class."""
self.__args = args
self.__kw = kw
def createInstance (self, factory, **kw):
"""Invoke the given factory method.
Position arguments to the factory are those cached in this instance.
Keyword arguments are the ones on the command line, updated from the
ones in this instance."""
kw.update(self.__kw)
return factory(*self.__args, **kw)
XMLStyle_minidom = 0
"""Use xml.dom.minidom for XML processing. This is the fastest, but does not
provide location information. It produces DOM instances."""
XMLStyle_saxdom = 1
"""Use pyxb.utils.saxdom for XML processing. This is the slowest, but both
provides location information and generates a DOM instance."""
XMLStyle_saxer = 2
"""Use pyxb.binding.saxer when converting documents to binding instances.
This style supports location information in the bindings. It produces binding
instances directly, without going through a DOM stage, so is faster than
XMLStyle_saxdom. However, since the pyxb.xmlschema.structures classes require
a DOM model, XMLStyle_saxdom will be used for pyxb.utils.domutils.StringToDOM
if this style is selected."""
_XMLStyle = XMLStyle_saxer
"""The current XML processing style."""
_XMLStyleMap = { 'minidom' : XMLStyle_minidom,
'saxdom' : XMLStyle_saxdom,
'saxer' : XMLStyle_saxer }
_XMLStyleMapReverse = dict([ (_v, _k) for (_k, _v) in _XMLStyleMap.iteritems() ])
_XMLStyle_envvar = 'PYXB_XML_STYLE'
def _SetXMLStyle (style=None):
"""Set the interface used to parse XML content.
This can be invoked within code. The system default of L{XMLStyle_saxer}
can also be overridden at runtime by setting the environment variable
C{PYXB_XML_STYLE} to one of C{minidom}, C{saxdom}, or C{saxer}.
@param style: One of L{XMLStyle_minidom}, L{XMLStyle_saxdom},
L{XMLStyle_saxer}. If not provided, the system default is used.
"""
global _XMLStyle
if style is None:
import os
style_name = os.environ.get(_XMLStyle_envvar)
if style_name is None:
style_name = 'saxer'
style = _XMLStyleMap.get(style_name)
if style is None:
raise PyXBException('Bad value "%s" for %s' % (style_name, _XMLStyle_envvar))
if _XMLStyleMapReverse.get(style) is None:
raise PyXBException('Bad value %s for _SetXMLStyle' % (style,))
_XMLStyle = style
_SetXMLStyle()
# Global flag that we can use to determine whether optimization is active in
# this session. There may be cases where we can bypass methods that just
# check for things we don't care about in an optimized context
_OptimizationActive = False
try:
assert False
_OptimizationActive = True
except:
pass
_CorruptionDetectionEnabled = not _OptimizationActive
"""If C{True}, blocks attempts to assign to attributes that are reserved for
PyXB methods.
Applies only at compilation time; dynamic changes are ignored.
"""
class ValidationConfig (object):
"""Class holding configuration related to validation.
L{pyxb.GlobalValidationConfig} is available to influence validation in all
contexts. Each binding class has a reference to an instance of this
class, which can be inspected using
L{pyxb.binding.basis._TypeBinding_mixin._GetValidationConfig} and changed
using L{pyxb.binding.basis._TypeBinding_mixin._SetValidationConfig}. Each
binding instance has a reference inherited from its class which can be
inspected using L{pyxb.binding.basis._TypeBinding_mixin._validationConfig}
and changed using
L{pyxb.binding.basis._TypeBinding_mixin._setValidationConfig}.
This allows fine control on a per class and per-instance basis.
L{forBinding} replaces L{RequireValidWhenParsing}.
L{forDocument} replaces L{RequireValidWhenGenerating}.
L{contentInfluencesGeneration}, L{orphanElementInContent}, and
L{invalidElementInContent} control how
L{pyxb.binding.basis.complexTypeDefinition.orderedContent} affects
generated documents.
"""
__forBinding = True
def _getForBinding (self):
"""C{True} iff validation should be performed when manipulating a
binding instance.
This includes parsing a document or DOM tree, using a binding instance
class constructor, or assigning to an element or attribute field of a
binding instance."""
return self.__forBinding
def _setForBinding (self, value):
"""Configure whether validation should be performed when manipulating
a binding instance."""
if not isinstance(value, bool):
raise TypeError(value)
self.__forBinding = value
return value
forBinding = property(_getForBinding)
__forDocument = True
def _getForDocument (self):
"""C{True} iff validation should be performed when creating a document
from a binding instance.
This applies at invocation of
L{toDOM()<pyxb.binding.basis._TypeBinding_mixin.toDOM>}.
L{toxml()<pyxb.binding.basis._TypeBinding_mixin.toDOM>} invokes C{toDOM()}."""
return self.__forDocument
def _setForDocument (self, value):
"""Configure whether validation should be performed when generating
a document from a binding instance."""
if not isinstance(value, bool):
raise TypeError(value)
self.__forDocument = value
return value
forDocument = property(_getForDocument)
ALWAYS = -1
"""Always do it."""
NEVER = 0
"""Never do it."""
IGNORE_ONCE = 1
"""If an error occurs ignore it and continue with the next one. (E.g., if
an element in a content list fails skip it and continue with the next
element in the list.)"""
GIVE_UP = 2
"""If an error occurs ignore it and stop using whatever provided the cause
of the error. (E.g., if an element in a content list fails stop
processing the content list and execute as though it was absent)."""
RAISE_EXCEPTION = 3
"""If an error occurs, raise an exception."""
MIXED_ONLY = 4
"""Only when content type is mixed."""
__contentInfluencesGeneration = MIXED_ONLY
def __getContentInfluencesGeneration (self):
"""Determine whether complex type content influences element order in
document generation.
The value is one of L{ALWAYS}, L{NEVER}, L{MIXED_ONLY} (default)."""
return self.__contentInfluencesGeneration
def _setContentInfluencesGeneration (self, value):
"""Set the value of L{contentInfluencesGeneration}."""
if not (value in ( self.ALWAYS, self.NEVER, self.MIXED_ONLY )):
raise ValueError(value)
self.__contentInfluencesGeneration = value
contentInfluencesGeneration = property(__getContentInfluencesGeneration)
__orphanElementInContent = IGNORE_ONCE
def __getOrphanElementInContent (self):
"""How to handle unrecognized elements in content lists.
This is used when consulting a complex type instance content list to
influence the generation of documents from a binding instance.
The value is one of L{IGNORE_ONCE} (default), L{GIVE_UP},
L{RAISE_EXCEPTION}."""
return self.__orphanElementInContent
def _setOrphanElementInContent (self, value):
"""Set the value of L{orphanElementInContent}."""
if not (value in ( self.IGNORE_ONCE, self.GIVE_UP, self.RAISE_EXCEPTION )):
raise ValueError(value)
self.__orphanElementInContent = value
orphanElementInContent = property(__getOrphanElementInContent)
__invalidElementInContent = IGNORE_ONCE
def __getInvalidElementInContent (self):
"""How to handle invalid elements in content lists.
The value is one of L{IGNORE_ONCE} (default), L{GIVE_UP},
L{RAISE_EXCEPTION}."""
return self.__invalidElementInContent
def _setInvalidElementInContent (self, value):
"""Set the value of L{invalidElementInContent}."""
if not (value in ( self.IGNORE_ONCE, self.GIVE_UP, self.RAISE_EXCEPTION )):
raise ValueError(value)
self.__invalidElementInContent = value
invalidElementInContent = property(__getInvalidElementInContent)
def copy (self):
"""Make a copy of this instance.
Use this to get a starting point when you need to customize validation
on a per-instance/per-class basis."""
import copy
return copy.copy(self)
GlobalValidationConfig = ValidationConfig()
_GenerationRequiresValid = True
"""Legacy flag; prefer L{forDocument<ValidationConfig.forDocument>} in L{GlobalValidationConfig}."""
def RequireValidWhenGenerating (value=None):
"""Query or set a flag that controls validation checking in XML generation.
Normally any attempts to convert a binding instance to a DOM or XML
representation requires that the binding validate against the content
model, since only in this way can the content be generated in the correct
order. In some cases it may be necessary or useful to generate a document
from a binding that is incomplete. If validation is not required, the
generated documents may not validate even if the content validates,
because ordering constraints will be ignored.
@keyword value: If absent or C{None}, no change is made; otherwise, this
enables (C{True}) or disables (C{False}) the requirement that instances
validate before being converted to XML.
@type value: C{bool}
@return: C{True} iff attempts to generate XML for a binding that does not
validate should raise an exception. """
if value is None:
return GlobalValidationConfig.forDocument
global _GenerationRequiresValid
_GenerationRequiresValid = GlobalValidationConfig._setForDocument(value)
return value
_ParsingRequiresValid = True
"""Legacy flag; prefer L{forBinding<ValidationConfig.forBinding>} in L{GlobalValidationConfig}."""
def RequireValidWhenParsing (value=None):
"""Query or set a flag that controls validation checking in XML parsing.
Normally any attempts to convert XML to a binding instance to a binding
instance requires that the document validate against the content model.
In some cases it may be necessary or useful to process a document that is
incomplete. If validation is not required, the generated documents may
not validate even if the content validates, because ordering constraints
will be ignored.
@keyword value: If absent or C{None}, no change is made; otherwise, this
enables (C{True}) or disables (C{False}) the requirement that documents
validate when being converted to bindings.
@type value: C{bool}
@return: C{True} iff attempts to generate bindings for a document that
does not validate should raise an exception."""
if value is None:
return GlobalValidationConfig.forBinding
global _ParsingRequiresValid
_ParsingRequiresValid = GlobalValidationConfig._setForBinding(value)
return _ParsingRequiresValid
_PreserveInputTimeZone = False
def PreserveInputTimeZone (value=None):
"""Control whether time values are converted to UTC during input.
The U{specification <http://www.w3.org/TR/xmlschema-2/#dateTime>} makes
clear that timezoned times are in UTC and that times in other timezones
are to be translated to UTC when converted from literal to value form.
Provide an option to bypass this step, so the input timezone is preserved.
@note: Naive processing of unnormalized times--i.e., ignoring the
C{tzinfo} field--may result in errors."""
global _PreserveInputTimeZone
if value is None:
return _PreserveInputTimeZone
if not isinstance(value, bool):
raise TypeError(value)
_PreserveInputTimeZone = value
return _PreserveInputTimeZone
_OutputEncoding = 'utf-8'
"""Default unicode encoding to use when creating output.
Material being written to an XML parser is not output."""
_InputEncoding = 'utf-8'
"""Default unicode encoding to assume when decoding input.
Material being written to an XML parser is treated as input."""
def NonElementContent (instance):
"""Return an iterator producing the non-element content of the provided instance.
The catenated text of the non-element content of an instance can
be obtained with::
text = u''.join(pyxb.NonElementContent(instance))
@param instance: An instance of L{pyxb.binding.basis.complexTypeDefinition}.
@return: an iterator producing text values
"""
import pyxb.binding.basis
return pyxb.binding.basis.NonElementContent.ContentIterator(instance.orderedContent())
## Local Variables:
## fill-column:78
## End:
|
|
from __future__ import annotations
import asyncio
from contextlib import contextmanager, ExitStack
import logging
from typing import (Any, Dict, List, Optional,
Tuple, TYPE_CHECKING, Union, Sequence)
try:
import aionotify # type: ignore
except OSError:
aionotify = None # type: ignore
from opentrons.drivers.smoothie_drivers import driver_3_0
from opentrons.drivers.rpi_drivers import build_gpio_chardev
import opentrons.config
from opentrons.config import pipette_config
from opentrons.types import Mount
from . import modules
from .execution_manager import ExecutionManager
from .types import BoardRevision, Axis
if TYPE_CHECKING:
from opentrons_shared_data.pipette.dev_types import (
PipetteModel, PipetteName
)
from .dev_types import (
RegisterModules, AttachedInstrument, AttachedInstruments,
InstrumentHardwareConfigs)
from opentrons.drivers.rpi_drivers.dev_types\
import GPIODriverLike # noqa(F501)
MODULE_LOG = logging.getLogger(__name__)
class Controller:
""" The concrete instance of the controller for actually controlling
hardware.
"""
def __init__(self, config):
""" Build a Controller instance.
If another controller is already instantiated on the system (or if
this is instantiated somewhere other than a robot) then this method
will raise a RuntimeError.
"""
if not opentrons.config.IS_ROBOT:
MODULE_LOG.warning(
'This is intended to run on a robot, and while it can connect '
'to a smoothie via a usb/serial adapter unexpected things '
'using gpios (such as smoothie reset or light management) '
'will fail. If you are seeing this message and you are '
'running on a robot, you need to set the RUNNING_ON_PI '
'environmental variable to 1.')
self.config = config or opentrons.config.robot_configs.load()
self._gpio_chardev = build_gpio_chardev('gpiochip0')
self._board_revision = BoardRevision.UNKNOWN
# We handle our own locks in the hardware controller thank you
self._smoothie_driver = driver_3_0.SmoothieDriver_3_0_0(
config=self.config, gpio_chardev=self._gpio_chardev,
handle_locks=False)
self._cached_fw_version: Optional[str] = None
try:
self._module_watcher = aionotify.Watcher()
self._module_watcher.watch(
alias='modules',
path='/dev',
flags=(aionotify.Flags.CREATE | aionotify.Flags.DELETE))
except AttributeError:
MODULE_LOG.warning(
'Failed to initiate aionotify, cannot watch modules '
'or door, likely because not running on linux')
@property
def gpio_chardev(self) -> GPIODriverLike:
return self._gpio_chardev
@property
def board_revision(self) -> BoardRevision:
return self._board_revision
async def setup_gpio_chardev(self):
self.gpio_chardev.config_by_board_rev()
self._board_revision = self.gpio_chardev.board_rev
await self.gpio_chardev.setup()
def start_gpio_door_watcher(self, **kargs):
self.gpio_chardev.start_door_switch_watcher(**kargs)
def update_position(self) -> Dict[str, float]:
self._smoothie_driver.update_position()
return self._smoothie_driver.position
def move(self, target_position: Dict[str, float],
home_flagged_axes: bool = True, speed: float = None,
axis_max_speeds: Dict[str, float] = None):
with ExitStack() as cmstack:
if axis_max_speeds:
cmstack.enter_context(
self._smoothie_driver.restore_axis_max_speed(
axis_max_speeds))
self._smoothie_driver.move(
target_position, home_flagged_axes=home_flagged_axes,
speed=speed)
def home(self, axes: List[str] = None) -> Dict[str, float]:
if axes:
args: Tuple[Any, ...] = (''.join(axes),)
else:
args = tuple()
return self._smoothie_driver.home(*args)
def fast_home(
self, axes: Sequence[str],
margin: float) -> Dict[str, float]:
converted_axes = ''.join(axes)
return self._smoothie_driver.fast_home(converted_axes, margin)
def _query_mount(
self,
mount: Mount,
expected: Union[PipetteModel, PipetteName, None]
) -> AttachedInstrument:
found_model: Optional[PipetteModel]\
= self._smoothie_driver.read_pipette_model( # type: ignore
mount.name.lower())
if found_model and found_model not in pipette_config.config_models:
# TODO: Consider how to handle this error - it bubbles up now
# and will cause problems at higher levels
MODULE_LOG.error(
f'Bad model on {mount.name}: {found_model}')
found_model = None
found_id = self._smoothie_driver.read_pipette_id(
mount.name.lower())
if found_model:
config = pipette_config.load(found_model, found_id)
if expected:
acceptable = [config.name] + config.back_compat_names
if expected not in acceptable:
raise RuntimeError(f'mount {mount}: instrument'
f' {expected} was requested'
f' but {config.model} is present')
return {'config': config,
'id': found_id}
else:
if expected:
raise RuntimeError(
f'mount {mount}: instrument {expected} was'
f' requested, but no instrument is present')
return {'config': None, 'id': None}
def get_attached_instruments(
self, expected: Dict[Mount, PipetteName])\
-> AttachedInstruments:
""" Find the instruments attached to our mounts.
:param expected: is ignored, it is just meant to enforce
the same interface as the simulator, where
required instruments can be manipulated.
:returns: A dict with mounts as the top-level keys. Each mount value is
a dict with keys 'model' (containing an instrument model name or
`None`) and 'id' (containing the serial number of the pipette
attached to that mount, or `None`). Both mounts will always be
specified.
"""
return {mount: self._query_mount(mount, expected.get(mount))
for mount in Mount}
def set_active_current(self, axis_currents: Dict[Axis, float]):
"""
This method sets only the 'active' current, i.e., the current for an
axis' movement. Smoothie driver automatically resets the current for
pipette axis to a low current (dwelling current) after each move
"""
self._smoothie_driver.set_active_current(
{axis.name: amp for axis, amp in axis_currents.items()})
@contextmanager
def save_current(self):
self._smoothie_driver.push_active_current()
try:
yield
finally:
self._smoothie_driver.pop_active_current()
async def _handle_watch_event(self, register_modules: RegisterModules):
try:
event = await self._module_watcher.get_event()
except asyncio.IncompleteReadError:
MODULE_LOG.debug("incomplete read error when quitting watcher")
return
flags = aionotify.Flags.parse(event.flags)
if event is not None and 'ot_module' in event.name:
maybe_module_at_port = modules.get_module_at_port(event.name)
new_modules = None
removed_modules = None
if maybe_module_at_port is not None:
if aionotify.Flags.DELETE in flags:
removed_modules = [maybe_module_at_port]
MODULE_LOG.info(
f'Module Removed: {maybe_module_at_port}')
elif aionotify.Flags.CREATE in flags:
new_modules = [maybe_module_at_port]
MODULE_LOG.info(
f'Module Added: {maybe_module_at_port}')
try:
await register_modules(
removed_mods_at_ports=removed_modules,
new_mods_at_ports=new_modules,
)
except Exception:
MODULE_LOG.exception(
'Exception in Module registration')
async def watch_modules(self, loop: asyncio.AbstractEventLoop,
register_modules: RegisterModules):
can_watch = aionotify is not None
if can_watch:
await self._module_watcher.setup(loop)
initial_modules = modules.discover()
try:
await register_modules(new_mods_at_ports=initial_modules)
except Exception:
MODULE_LOG.exception('Exception in Module registration')
while can_watch and (not self._module_watcher.closed):
await self._handle_watch_event(register_modules)
async def build_module(
self,
port: str,
model: str,
interrupt_callback: modules.InterruptCallback,
loop: asyncio.AbstractEventLoop,
execution_manager: ExecutionManager) -> modules.AbstractModule:
return await modules.build(
port=port,
which=model,
simulating=False,
interrupt_callback=interrupt_callback,
loop=loop,
execution_manager=execution_manager)
async def connect(self, port: str = None):
self._smoothie_driver.connect(port)
await self.update_fw_version()
@property
def axis_bounds(self) -> Dict[Axis, Tuple[float, float]]:
""" The (minimum, maximum) bounds for each axis. """
return {Axis[ax]: (0, pos) for ax, pos
in self._smoothie_driver.homed_position.items()
if ax not in 'BC'}
@property
def fw_version(self) -> Optional[str]:
return self._cached_fw_version
async def update_fw_version(self):
self._cached_fw_version = self._smoothie_driver.get_fw_version()
async def update_firmware(self,
filename: str,
loop: asyncio.AbstractEventLoop,
modeset: bool) -> str:
msg = await self._smoothie_driver.update_firmware(
filename, loop, modeset)
await self.update_fw_version()
return msg
def engaged_axes(self) -> Dict[str, bool]:
return self._smoothie_driver.engaged_axes
def disengage_axes(self, axes: List[str]):
self._smoothie_driver.disengage_axis(''.join(axes))
def set_lights(self, button: Optional[bool], rails: Optional[bool]):
if opentrons.config.IS_ROBOT:
if button is not None:
self.gpio_chardev.set_button_light(blue=button)
if rails is not None:
self.gpio_chardev.set_rail_lights(rails)
def get_lights(self) -> Dict[str, bool]:
if not opentrons.config.IS_ROBOT:
return {}
return {'button': self.gpio_chardev.get_button_light()[2],
'rails': self.gpio_chardev.get_rail_lights()}
def pause(self):
self._smoothie_driver.pause()
def resume(self):
self._smoothie_driver.resume()
def halt(self):
self._smoothie_driver.kill()
def hard_halt(self):
self._smoothie_driver.hard_halt()
def probe(self, axis: str, distance: float) -> Dict[str, float]:
""" Run a probe and return the new position dict
"""
return self._smoothie_driver.probe_axis(axis, distance)
def clean_up(self):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
return
if hasattr(self, '_module_watcher'):
if loop.is_running() and self._module_watcher:
self._module_watcher.close()
if hasattr(self, 'gpio_chardev'):
try:
if not loop.is_closed():
self.gpio_chardev.stop_door_switch_watcher(loop)
except RuntimeError:
pass
def configure_mount(self, mount: Mount, config: InstrumentHardwareConfigs):
mount_axis = Axis.by_mount(mount)
plunger_axis = Axis.of_plunger(mount)
self._smoothie_driver.update_steps_per_mm(
{plunger_axis.name: config['steps_per_mm']})
self._smoothie_driver.update_pipette_config(
mount_axis.name, {'home': config['home_pos']})
self._smoothie_driver.update_pipette_config(
plunger_axis.name, {'max_travel': config['max_travel']})
self._smoothie_driver.set_dwelling_current(
{plunger_axis.name: config['idle_current']})
ms = config['splits']
if ms:
self._smoothie_driver.configure_splits_for(
{plunger_axis.name: ms})
def __del__(self):
self.clean_up()
|
|
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.8+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_source, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
toplevel = run_command([GIT, "rev-parse", "--show-toplevel"],
hide_stderr=True)
root = (toplevel.strip() if toplevel else os.path.dirname(here))
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "v"
parentdir_prefix = "django_url_counter-"
versionfile_source = "django_url_counter/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return rep_by_pep440(ver)
def git2pep440(ver_str):
try:
tag, commits, _ = ver_str.split('-', 2)
return ".post".join([tag, commits])
except ValueError:
return ver_str
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import decimal
import datetime
import pyarrow as pa
from pyarrow import fs
from pyarrow.tests import util
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not orc'
pytestmark = pytest.mark.orc
try:
from pandas.testing import assert_frame_equal
import pandas as pd
except ImportError:
pass
@pytest.fixture(scope="module")
def datadir(base_datadir):
return base_datadir / "orc"
def fix_example_values(actual_cols, expected_cols):
"""
Fix type of expected values (as read from JSON) according to
actual ORC datatype.
"""
for name in expected_cols:
expected = expected_cols[name]
actual = actual_cols[name]
if (name == "map" and
[d.keys() == {'key', 'value'} for m in expected for d in m]):
# convert [{'key': k, 'value': v}, ...] to [(k, v), ...]
for i, m in enumerate(expected):
expected_cols[name][i] = [(d['key'], d['value']) for d in m]
continue
typ = actual[0].__class__
if issubclass(typ, datetime.datetime):
# timestamp fields are represented as strings in JSON files
expected = pd.to_datetime(expected)
elif issubclass(typ, datetime.date):
# date fields are represented as strings in JSON files
expected = expected.dt.date
elif typ is decimal.Decimal:
converted_decimals = [None] * len(expected)
# decimal fields are represented as reals in JSON files
for i, (d, v) in enumerate(zip(actual, expected)):
if not pd.isnull(v):
exp = d.as_tuple().exponent
factor = 10 ** -exp
converted_decimals[i] = (
decimal.Decimal(round(v * factor)).scaleb(exp))
expected = pd.Series(converted_decimals)
expected_cols[name] = expected
def check_example_values(orc_df, expected_df, start=None, stop=None):
if start is not None or stop is not None:
expected_df = expected_df[start:stop].reset_index(drop=True)
assert_frame_equal(orc_df, expected_df, check_dtype=False)
def check_example_file(orc_path, expected_df, need_fix=False):
"""
Check a ORC file against the expected columns dictionary.
"""
from pyarrow import orc
orc_file = orc.ORCFile(orc_path)
# Exercise ORCFile.read()
table = orc_file.read()
assert isinstance(table, pa.Table)
table.validate()
# This workaround needed because of ARROW-3080
orc_df = pd.DataFrame(table.to_pydict())
assert set(expected_df.columns) == set(orc_df.columns)
# reorder columns if necessary
if not orc_df.columns.equals(expected_df.columns):
expected_df = expected_df.reindex(columns=orc_df.columns)
if need_fix:
fix_example_values(orc_df, expected_df)
check_example_values(orc_df, expected_df)
# Exercise ORCFile.read_stripe()
json_pos = 0
for i in range(orc_file.nstripes):
batch = orc_file.read_stripe(i)
check_example_values(pd.DataFrame(batch.to_pydict()),
expected_df,
start=json_pos,
stop=json_pos + len(batch))
json_pos += len(batch)
assert json_pos == orc_file.nrows
@pytest.mark.pandas
@pytest.mark.parametrize('filename', [
'TestOrcFile.test1.orc',
'TestOrcFile.testDate1900.orc',
'decimal.orc'
])
def test_example_using_json(filename, datadir):
"""
Check a ORC file example against the equivalent JSON file, as given
in the Apache ORC repository (the JSON file has one JSON object per
line, corresponding to one row in the ORC file).
"""
# Read JSON file
path = datadir / filename
table = pd.read_json(str(path.with_suffix('.jsn.gz')), lines=True)
check_example_file(path, table, need_fix=True)
def test_orcfile_empty(datadir):
from pyarrow import orc
table = orc.ORCFile(datadir / "TestOrcFile.emptyFile.orc").read()
assert table.num_rows == 0
expected_schema = pa.schema([
("boolean1", pa.bool_()),
("byte1", pa.int8()),
("short1", pa.int16()),
("int1", pa.int32()),
("long1", pa.int64()),
("float1", pa.float32()),
("double1", pa.float64()),
("bytes1", pa.binary()),
("string1", pa.string()),
("middle", pa.struct(
[("list", pa.list_(
pa.struct([("int1", pa.int32()),
("string1", pa.string())])))
])),
("list", pa.list_(
pa.struct([("int1", pa.int32()),
("string1", pa.string())])
)),
("map", pa.map_(pa.string(),
pa.struct([("int1", pa.int32()),
("string1", pa.string())])
)),
])
assert table.schema == expected_schema
def test_filesystem_uri(tmpdir):
from pyarrow import orc
table = pa.table({"a": [1, 2, 3]})
directory = tmpdir / "data_dir"
directory.mkdir()
path = directory / "data.orc"
orc.write_table(table, str(path))
# filesystem object
result = orc.read_table(path, filesystem=fs.LocalFileSystem())
assert result.equals(table)
# filesystem URI
result = orc.read_table(
"data_dir/data.orc", filesystem=util._filesystem_uri(tmpdir))
assert result.equals(table)
# use the path only
result = orc.read_table(
util._filesystem_uri(path))
assert result.equals(table)
def test_orcfile_readwrite(tmpdir):
from pyarrow import orc
a = pa.array([1, None, 3, None])
b = pa.array([None, "Arrow", None, "ORC"])
table = pa.table({"int64": a, "utf8": b})
file = tmpdir.join("test.orc")
orc.write_table(table, file)
output_table = orc.read_table(file)
assert table.equals(output_table)
output_table = orc.read_table(file, [])
assert 4 == output_table.num_rows
assert 0 == output_table.num_columns
output_table = orc.read_table(file, columns=["int64"])
assert 4 == output_table.num_rows
assert 1 == output_table.num_columns
def test_bytesio_readwrite():
from pyarrow import orc
from io import BytesIO
buf = BytesIO()
a = pa.array([1, None, 3, None])
b = pa.array([None, "Arrow", None, "ORC"])
table = pa.table({"int64": a, "utf8": b})
orc.write_table(table, buf)
buf.seek(0)
orc_file = orc.ORCFile(buf)
output_table = orc_file.read()
assert table.equals(output_table)
def test_buffer_readwrite():
from pyarrow import orc
buffer_output_stream = pa.BufferOutputStream()
a = pa.array([1, None, 3, None])
b = pa.array([None, "Arrow", None, "ORC"])
table = pa.table({"int64": a, "utf8": b})
orc.write_table(table, buffer_output_stream)
buffer_reader = pa.BufferReader(buffer_output_stream.getvalue())
orc_file = orc.ORCFile(buffer_reader)
output_table = orc_file.read()
assert table.equals(output_table)
# Check for default WriteOptions
assert orc_file.compression == 'UNCOMPRESSED'
assert orc_file.file_version == '0.12'
assert orc_file.row_index_stride == 10000
assert orc_file.compression_size == 65536
# deprecated keyword order
buffer_output_stream = pa.BufferOutputStream()
with pytest.warns(FutureWarning):
orc.write_table(buffer_output_stream, table)
buffer_reader = pa.BufferReader(buffer_output_stream.getvalue())
orc_file = orc.ORCFile(buffer_reader)
output_table = orc_file.read()
assert table.equals(output_table)
# Check for default WriteOptions
assert orc_file.compression == 'UNCOMPRESSED'
assert orc_file.file_version == '0.12'
assert orc_file.row_index_stride == 10000
assert orc_file.compression_size == 65536
@pytest.mark.snappy
def test_buffer_readwrite_with_writeoptions():
from pyarrow import orc
buffer_output_stream = pa.BufferOutputStream()
a = pa.array([1, None, 3, None])
b = pa.array([None, "Arrow", None, "ORC"])
table = pa.table({"int64": a, "utf8": b})
orc.write_table(
table,
buffer_output_stream,
compression='snappy',
file_version='0.11',
row_index_stride=5000,
compression_block_size=32768,
)
buffer_reader = pa.BufferReader(buffer_output_stream.getvalue())
orc_file = orc.ORCFile(buffer_reader)
output_table = orc_file.read()
assert table.equals(output_table)
# Check for modified WriteOptions
assert orc_file.compression == 'SNAPPY'
assert orc_file.file_version == '0.11'
assert orc_file.row_index_stride == 5000
assert orc_file.compression_size == 32768
# deprecated keyword order
buffer_output_stream = pa.BufferOutputStream()
with pytest.warns(FutureWarning):
orc.write_table(
buffer_output_stream,
table,
compression='uncompressed',
file_version='0.11',
row_index_stride=20000,
compression_block_size=16384,
)
buffer_reader = pa.BufferReader(buffer_output_stream.getvalue())
orc_file = orc.ORCFile(buffer_reader)
output_table = orc_file.read()
assert table.equals(output_table)
# Check for default WriteOptions
assert orc_file.compression == 'UNCOMPRESSED'
assert orc_file.file_version == '0.11'
assert orc_file.row_index_stride == 20000
assert orc_file.compression_size == 16384
def test_buffer_readwrite_with_bad_writeoptions():
from pyarrow import orc
buffer_output_stream = pa.BufferOutputStream()
a = pa.array([1, None, 3, None])
table = pa.table({"int64": a})
# batch_size must be a positive integer
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
batch_size=0,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
batch_size=-100,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
batch_size=1024.23,
)
# file_version must be 0.11 or 0.12
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
file_version=0.13,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
file_version='1.1',
)
# stripe_size must be a positive integer
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
stripe_size=0,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
stripe_size=-400,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
stripe_size=4096.73,
)
# compression must be among the given options
with pytest.raises(TypeError):
orc.write_table(
table,
buffer_output_stream,
compression=0,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression='none',
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression='zlid',
)
# compression_block_size must be a positive integer
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression_block_size=0,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression_block_size=-200,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression_block_size=1096.73,
)
# compression_strategy must be among the given options
with pytest.raises(TypeError):
orc.write_table(
table,
buffer_output_stream,
compression_strategy=0,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression_strategy='no',
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
compression_strategy='large',
)
# row_index_stride must be a positive integer
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
row_index_stride=0,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
row_index_stride=-800,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
row_index_stride=3096.29,
)
# padding_tolerance must be possible to cast to float
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
padding_tolerance='cat',
)
# dictionary_key_size_threshold must be possible to cast to
# float between 0.0 and 1.0
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
dictionary_key_size_threshold='arrow',
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
dictionary_key_size_threshold=1.2,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
dictionary_key_size_threshold=-3.2,
)
# bloom_filter_columns must be convertible to a list containing
# nonnegative integers
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
bloom_filter_columns="string",
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
bloom_filter_columns=[0, 1.4],
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
bloom_filter_columns={0, 2, -1},
)
# bloom_filter_fpp must be convertible to a float between 0.0 and 1.0
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
bloom_filter_fpp='arrow',
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
bloom_filter_fpp=1.1,
)
with pytest.raises(ValueError):
orc.write_table(
table,
buffer_output_stream,
bloom_filter_fpp=-0.1,
)
def test_column_selection(tempdir):
from pyarrow import orc
# create a table with nested types
inner = pa.field('inner', pa.int64())
middle = pa.field('middle', pa.struct([inner]))
fields = [
pa.field('basic', pa.int32()),
pa.field(
'list', pa.list_(pa.field('item', pa.int32()))
),
pa.field(
'struct', pa.struct([middle, pa.field('inner2', pa.int64())])
),
pa.field(
'list-struct', pa.list_(pa.field(
'item', pa.struct([
pa.field('inner1', pa.int64()),
pa.field('inner2', pa.int64())
])
))
),
pa.field('basic2', pa.int64()),
]
arrs = [
[0], [[1, 2]], [{"middle": {"inner": 3}, "inner2": 4}],
[[{"inner1": 5, "inner2": 6}, {"inner1": 7, "inner2": 8}]], [9]]
table = pa.table(arrs, schema=pa.schema(fields))
path = str(tempdir / 'test.orc')
orc.write_table(table, path)
orc_file = orc.ORCFile(path)
# default selecting all columns
result1 = orc_file.read()
assert result1.equals(table)
# selecting with columns names
result2 = orc_file.read(columns=["basic", "basic2"])
assert result2.equals(table.select(["basic", "basic2"]))
result3 = orc_file.read(columns=["list", "struct", "basic2"])
assert result3.equals(table.select(["list", "struct", "basic2"]))
# using dotted paths
result4 = orc_file.read(columns=["struct.middle.inner"])
expected4 = pa.table({"struct": [{"middle": {"inner": 3}}]})
assert result4.equals(expected4)
result5 = orc_file.read(columns=["struct.inner2"])
expected5 = pa.table({"struct": [{"inner2": 4}]})
assert result5.equals(expected5)
result6 = orc_file.read(
columns=["list", "struct.middle.inner", "struct.inner2"]
)
assert result6.equals(table.select(["list", "struct"]))
result7 = orc_file.read(columns=["list-struct.inner1"])
expected7 = pa.table({"list-struct": [[{"inner1": 5}, {"inner1": 7}]]})
assert result7.equals(expected7)
# selecting with (Arrow-based) field indices
result2 = orc_file.read(columns=[0, 4])
assert result2.equals(table.select(["basic", "basic2"]))
result3 = orc_file.read(columns=[1, 2, 3])
assert result3.equals(table.select(["list", "struct", "list-struct"]))
# error on non-existing name or index
with pytest.raises(IOError):
# liborc returns ParseError, which gets translated into IOError
# instead of ValueError
orc_file.read(columns=["wrong"])
with pytest.raises(ValueError):
orc_file.read(columns=[5])
|
|
"""
File: continuous.py
Author: Come Bertrand
Email: bertrand.cosme@gmail.com
Github: https://github.com/ComeBertrand
Description: Classical continuous functions for performance evaluation of
metaheuristics. All theses functions were taken from the following website :
https://www.sfu.ca/~ssurjano/optimization.html
"""
import numpy as np
from ...models import Problem
from ...common.representation import RealEncoding, Boundaries
from ...common.fitness import Objective
from ...operators.neighborhood import NeighborhoodOperator, move_distance_continuous, ContinuousLogMoveRange
class ContinuousProblem(Problem):
"""Problems that are defined by a continuous function.
# TODO: Do it in a more abstract way and move it in abstract
Args:
n_dim (int): Number of dimensions.
min_vals (np.array): Minimum values for each dimension.
max_vals (np.array): Maximum values for each dimension.
move_range (MoveRange): Range of the move step.
known_min (float): Minimum of the continuous function. None means that
the minimum is not known.
"""
def __init__(self, n_dim, min_vals, max_vals, move_range, known_min):
nb_neighbors = n_dim * 100 # TODO: shall be an argument of the object
neighborhood = NeighborhoodOperator(move_distance_continuous, move_range, nb_neighbors)
boundaries = Boundaries(min_vals, max_vals, np.float)
encoding = RealEncoding(boundaries)
objective = Objective(self._eval_func)
super().__init__(objective, encoding, neighborhood=neighborhood, known_min=known_min)
def _eval_func(self, solution):
"""Actual evaluation of a solution by the continuous function.
Args:
solution (Solution): Solution to be evaluated.
Returns:
float: function value of the solution.
"""
raise NotImplementedError("Abstract Class")
# --------------------------------------------------------------------------- #
# Functions with many local minima #
# --------------------------------------------------------------------------- #
class Ackleys(ContinuousProblem):
"""Ackley's function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-32.768] * n_dim, np.float)
max_vals = np.array([32.768] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
part1 = -0.2 * np.sqrt(1/n * np.sum(solution * solution))
part2 = 1/n * np.sum(np.cos(2 * np.pi * solution))
return 20 - 20 * np.exp(part1) + np.e - np.exp(part2)
class Bukin6(ContinuousProblem):
"""Bukin funtion N.6."""
def __init__(self):
n_dim = 2
min_vals = np.array([-15.0, -3.0], np.float)
max_vals = np.array([-5.0, 3.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(solution[1] - 0.01 * solution[0] * solution[0])
part2 = np.abs(solution[0] + 10)
return 100 * np.sqrt(part1) + 0.01 * part2
class CrossInTray(ContinuousProblem):
"""Cross-in-tray function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0, -10.0], np.float)
max_vals = np.array([10.0, 10.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -2.06261
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(100 - np.sqrt(np.sum(solution * solution)) / np.pi)
part2 = np.sin(solution[0]) * np.sin(solution[1])
final = np.abs(part2 * np.exp(part1)) + 1.0
return -0.0001 * np.power(final, 0.1)
class DropWave(ContinuousProblem):
"""Drop-Wave function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.12, -5.12], np.float)
max_vals = np.array([5.12, 5.12], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -1.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
sum_sol_sq = np.sum(solution * solution)
part1 = 1.0 + np.cos(12 * np.sqrt(sum_sol_sq))
part2 = 0.5 * sum_sol_sq + 2.0
return -1.0 * (part1 / part2)
class Eggholder(ContinuousProblem):
"""Eggholder function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-512, -512], np.float)
max_vals = np.array([512, 512], np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = -959.6407
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sin(np.sqrt(np.abs(solution[1] + (solution[0]/2.) + 47)))
part2 = np.sin(np.sqrt(np.abs(solution[0] - (solution[1] + 47))))
return -1.0 * (solution[1] + 47) * part1 - 1.0 * part2
class GramacyLee(ContinuousProblem):
"""Gramacy & Lee function."""
def __init__(self):
n_dim = 1
min_vals = np.array([0.5], np.float)
max_vals = np.array([2.5], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sin(10 * np.pi * solution[0]) / (2 * solution[0])
part2 = np.power(solution[0] - 1.0, 4)
return part1 + part2
class Griewank(ContinuousProblem):
"""Griewank function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-600] * n_dim, np.float)
max_vals = np.array([600] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sum((solution * solution) / 4000.0)
sqrt = np.array([np.sqrt(i) for i in range(len(solution))], np.float)
part2 = np.prod(np.cos(solution / sqrt))
return part1 - 1.0 * part2 + 1.0
class HolderTable(ContinuousProblem):
"""Holder Table function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0, -10.0], np.float)
max_vals = np.array([10.0, 10.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -19.2085
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
sum_sqrt_sq = np.sqrt(np.sum(solution * solution))
part1 = np.exp(np.abs(1.0 - (sum_sqrt_sq / np.pi)))
part2 = np.sin(solution[0]) * np.cos(solution[1])
return -1.0 * np.abs(part2 * part1)
class Langermann(ContinuousProblem):
"""Langermann function."""
def __init__(self):
n_dim = 2
min_vals = np.array([0.0] * n_dim, np.float)
max_vals = np.array([10.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = np.array([[3, 5],
[5, 2],
[2, 1],
[1, 4],
[7, 9]], np.float)
c = np.array([1, 2, 5, 2, 3], np.float)
part_sum = np.sum((solution - A) * (solution - A), axis=1)
part1 = np.cos(np.pi * part_sum)
part2 = np.exp((-1.0 / np.pi) * part_sum)
return np.sum(c * part1 * part2)
class Levy(ContinuousProblem):
"""Levy function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
w = 1.0 + (solution - 1.0) / 4.0
part_w = w[:-1]
part1 = np.power(np.sin(np.pi * w[0]), 2)
part2 = np.sum((part_w - 1) * (part_w - 1) *
(1 + 10 * np.power(np.sin(np.pi * part_w + 1), 2)))
part3 = ((w[-1] - 1) * (w[-1] - 1) * (1 + np.power(np.sin(2 * np.pi *
w[-1]), 2)))
return part1 + part2 + part3
class Levy13(ContinuousProblem):
"""Levy function N.13."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0] * n_dim, np.float)
max_vals = np.array([10.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
arg1, arg2 = solution
part1 = np.power(np.sin(3 * np.pi * arg1), 2)
part2 = (arg1 - 1) * (arg1 - 1) * (1 + np.power(np.sin(3 * np.pi * arg2), 2))
part3 = (arg2 - 1) * (arg2 - 1) * (1 + np.power(np.sin(2 * np.pi * arg2), 2))
return part1 + part2 + part3
class Rastrigin(ContinuousProblem):
"""Rastrigin function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-5.12] * n_dim, np.float)
max_vals = np.array([5.12] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = 10
n = len(solution)
part1 = A * np.cos(2 * np.pi * solution)
part2 = solution * solution
return A*n + np.sum(part2 - part1)
class Schaffer2(ContinuousProblem):
"""Schaffer function N.2."""
def __init__(self):
n_dim = 2
min_vals = np.array([-100.0] * n_dim, np.float)
max_vals = np.array([100.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.power(np.sin((x1 * x1) - (x2 * x2)), 2)
part2 = np.power(1.0 + 0.001 * ((x1 * x1) + (x2 * x2)), 2)
return 0.5 + (part1 - 0.5) / part2
class Schaffer4(ContinuousProblem):
"""Schaffer function N.4."""
def __init__(self):
n_dim = 2
min_vals = np.array([-100.0] * n_dim, np.float)
max_vals = np.array([100.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.cos(np.sin(np.abs((x1 * x1) - (x2 * x2))))
part2 = np.power(1.0 + 0.001 * ((x1 * x1) + (x2 * x2)), 2)
return 0.5 + (part1 - 0.5) / part2
class Schwefel(ContinuousProblem):
"""Schwefel function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-500] * n_dim, np.float)
max_vals = np.array([500] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 20.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A_constant = 418.9829
n = len(solution)
sq_sol = np.sqrt(np.abs(solution))
return A_constant * n - 1.0 * np.sum(solution * np.sin(sq_sol))
class Shubert(ContinuousProblem):
"""Shubert function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.12] * n_dim, np.float)
max_vals = np.array([5.12] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -186.7309
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = np.array(range(1, 6), np.float)
x1 = solution[0]
x2 = solution[1]
part1 = A + np.cos((A + 1.) * x1 + A)
part2 = A + np.cos((A + 1.) * x2 + A)
return np.sum(part1) * np.sum(part2)
# --------------------------------------------------------------------------- #
# Functions Bowl-Shaped #
# --------------------------------------------------------------------------- #
class Bohachevsky(ContinuousProblem):
"""Bohachevsky functions.
Args:
num_func (int): 1, 2 or 3. Define which Bohachevsky function is used.
Default is 1.
"""
def __init__(self, num_func=1):
if num_func not in [1, 2, 3]:
raise ValueError("The Bohachevsky can only be of "
"numbers 1, 2 or 3.")
self.num_func = num_func
n_dim = 2
min_vals = np.array([-100] * n_dim, np.float)
max_vals = np.array([100] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
if self.num_func == 1:
return self._eval_func_1(solution)
elif self.num_func == 2:
return self._eval_func_2(solution)
elif self.num_func == 3:
return self._eval_func_3(solution)
def _eval_func_1(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = (x1 * x1) + 2 * (x2 * x2)
part2 = 0.3 * np.cos(3 * np.pi * x1)
part3 = 0.4 * np.cos(4 * np.pi * x2)
return part1 - part2 - part3 + 0.7
def _eval_func_2(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = (x1 * x1) + 2 * (x2 * x2)
part2 = 0.3 * np.cos(3 * np.pi * x1)
part3 = np.cos(4 * np.pi * x2)
return part1 - (part2 * part3) + 0.3
def _eval_func_3(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = (x1 * x1) + 2 * (x2 * x2)
part2 = 3 * np.pi * x1
part3 = 4 * np.pi * x2
return part1 - 0.3 * np.cos(part2 + part3) + 0.3
class Perm0(ContinuousProblem):
"""Perm 0,d,B function.
Args:
n_dim (int): Number of dimensions.
beta (float): Argument of the function.
"""
def __init__(self, n_dim, beta):
self.beta = beta
min_vals = np.array([-1 * n_dim] * n_dim, np.float)
max_vals = np.array([n_dim] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, n_dim/10.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
j = np.array(range(1, n+1), np.float)
s_mat = np.zeros((n, n), np.float)
j_mat = np.zeros((n, n), np.float)
for i in range(n):
s_mat[i] += np.power(solution, i+1)
j_mat[i] += 1 / np.power(j, i+1)
part1 = np.sum(j + self.beta) * np.sum(s_mat - 1.0 * j_mat, axis=1)
return np.sum(np.power(part1, 2))
class RotatedHyperEllipsoid(ContinuousProblem):
"""Rotated Hyper-Ellipsoid function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-65.536] * n_dim, np.float)
max_vals = np.array([65.536] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
s_mat = np.zeros((n, n), np.float)
solsq = solution * solution
prod = solsq.copy()
for i in range(n):
l = np.array(prod[:i+1].copy())
l.resize((n,))
s_mat[i] += l
return np.sum(s_mat)
class Sphere(ContinuousProblem):
"""Sphere function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-5.12] * n_dim, np.float)
max_vals = np.array([5.12] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
return np.sum(solution * solution)
class SumDiffPower(ContinuousProblem):
"""Sum of Different Powers function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-1] * n_dim, np.float)
max_vals = np.array([1] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.001, 0.1)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
powers = np.array(range(2, n+2), np.float)
return np.sum(np.power(np.abs(solution), powers))
class SumSquare(ContinuousProblem):
"""Sum Squares function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
i = np.array(range(1, n+1), np.float)
return np.sum(i * solution * solution)
class Trid(ContinuousProblem):
"""Trid function.
Global minimum are knowm for dimensions 6 and 10.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
dimsq = n_dim * n_dim
min_vals = np.array([dimsq] * n_dim, np.float)
max_vals = np.array([dimsq] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, dimsq/10.)
if n_dim == 6:
known_min = -50.
elif n_dim == 10:
known_min = -200.
else:
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sum(np.power(solution - 1, 2))
part2 = np.sum(solution[1:] * solution[:-1])
return part1 - part2
# --------------------------------------------------------------------------- #
# Functions Plate-Shaped #
# --------------------------------------------------------------------------- #
class Booth(ContinuousProblem):
"""Booth function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.power(x1 + 2 * x2 - 7.0, 2)
part2 = np.power(2 * x1 + x2 - 5.0, 2)
return part1 + part2
class Matyas(ContinuousProblem):
"""Matyas function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = 0.26 * (x1 * x1 + x2 * x2)
part2 = 0.48 * x1 * x2
return part1 - part2
class McCormick(ContinuousProblem):
"""McCormick function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-1.5, 3.0], np.float)
max_vals = np.array([4.0, 4.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = -1.9133
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.sin(x1 + x2) + np.power(x1 - x2, 2)
part2 = -1.0 * x1 + 2.5 * x2 + 1.0
return part1 + part2
class PowerSum(ContinuousProblem):
"""Power Sum function."""
def __init__(self):
n_dim = 4
min_vals = np.array([0] * n_dim, np.float)
max_vals = np.array([n_dim] * n_dim, np.float)
step = n_dim / 10.
move_range = ContinuousLogMoveRange(0.01, step)
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
b = np.array([8, 18, 44, 114], np.float)
n = len(solution)
s_mat = np.zeros((n, n), np.float)
i = np.array(range(1, n+1), np.float)
prod = np.power(solution, i)
for i in range(n):
l = np.array(prod[:i+1].copy())
l.resize((n,))
s_mat[i] += l
return np.sum(np.power(np.sum(s_mat, axis=1) - b, 2))
class Zakharov(ContinuousProblem):
"""Zakharov function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-5] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
i = np.array(range(1, n+1), np.float)
part1 = np.sum(np.power(solution, 2))
part2 = np.power(np.sum(0.5 * i * solution), 2)
part3 = np.power(np.sum(0.5 * i * solution), 4)
return part1 + part2 + part3
# --------------------------------------------------------------------------- #
# Functions Valley-Shaped #
# --------------------------------------------------------------------------- #
class ThreeHumpCamel(ContinuousProblem):
"""Three-Hump Camel function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.0] * n_dim, np.float)
max_vals = np.array([5.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = 2 * x1 * x1
part2 = 1.05 * np.power(x1, 4)
part3 = np.power(x1, 6) / 6.
part4 = x1 * x2
part5 = np.power(x2, 2)
return part1 - part2 + part3 + part4 + part5
class SixHumpCamel(ContinuousProblem):
"""Six-Hump Camel function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-3.0, -2.0], np.float)
max_vals = np.array([3.0, 2.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = -1.0316
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = 4.0 - 2.1 * np.power(x1, 2) + np.power(x1, 4) / 3.0
part2 = np.power(x2, 2)
part3 = x1 * x2
part4 = (-4.0 + 4.0 * np.power(x2, 2)) * np.power(x2, 2)
return part1 * part2 + part3 + part4
class DixonPrice(ContinuousProblem):
"""Dixon-Price function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-10.0] * n_dim, np.float)
max_vals = np.array([10.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
x1 = solution[0]
s1 = solution[1:]
s2 = solution[:-1]
i = np.array(range(2, n+1), np.float)
part1 = np.power(x1 - 1.0, 2)
part2 = np.sum(i * np.power(2 * s1 * s1 - s2, 2))
return part1 + part2
class Rosenbrock(ContinuousProblem):
"""Rosenbrock function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-5.0] * n_dim, np.float)
max_vals = np.array([10.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
s1 = solution[1:]
s2 = solution[:-1]
part1 = s1 - (s2 * s2)
part2 = s2 - 1.
return np.sum((100 * part1 * part1) + (part2 * part2))
# --------------------------------------------------------------------------- #
# Functions with Steep Ridges/Drops #
# --------------------------------------------------------------------------- #
class DeJong5(ContinuousProblem):
"""De Jong function N.5."""
def __init__(self):
n_dim = 2
min_vals = np.array([-65.536] * n_dim, np.float)
max_vals = np.array([65.536] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 5.)
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = np.array([[-32, -16, 0, 16, 32] * 5,
[-32] * 5 + [-16] * 5 + [0] * 5 + [16] * 5 + [32] * 5],
np.float)
x1 = solution[0]
x2 = solution[1]
i = np.array(range(1, 26), np.float)
part1 = np.sum(np.power(i + np.power(x1 - A[0], 6) +
np.power(x2 - A[1], 6), -1))
return np.power(0.002 + part1, -1)
class Easom(ContinuousProblem):
"""Easom function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-100] * n_dim, np.float)
max_vals = np.array([100] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.)
known_min = -1.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = -1 * np.power(x1 - np.pi, 2) - 1 * np.power(x2 - np.pi, 2)
part2 = -1 * np.cos(x1) * np.cos(x2)
return part2 * np.exp(part1)
class Michalewicz(ContinuousProblem):
"""Michalewicz function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([0.0] * n_dim, np.float)
max_vals = np.array([np.pi] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
if n_dim == 2:
known_min = -1.8013
elif n_dim == 5:
known_min = -4.687658
elif n_dim == 10:
known_min = -9.66015
else:
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
m = 10
i = np.array(range(1, n+1), np.float)
part1 = np.sin(solution)
part2 = np.power(np.sin(i * solution * solution * (1 / np.pi)), 2 * m)
return -1.0 * np.sum(part1 * part2)
# --------------------------------------------------------------------------- #
# Others #
# --------------------------------------------------------------------------- #
class Beale(ContinuousProblem):
"""Beale function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-4.5] * n_dim, np.float)
max_vals = np.array([4.5] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.power(1.5 - x1 + x1 * x2, 2)
part2 = np.power(2.25 - x1 + x1 * x2 * x2, 2)
part3 = np.power(2.625 - x1 + x1 * x2 * x2 * x2, 2)
return part1 + part2 + part3
class Branin(ContinuousProblem):
"""Branin function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-4.5] * n_dim, np.float)
max_vals = np.array([4.5] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
a = 1.0
b = 5.1 / (4 * np.pi * np.pi)
c = 5 / np.pi
r = 6
s = 10
t = 1 / (8 * np.pi)
x1 = solution[0]
x2 = solution[1]
part1 = x2 - b * x1 * x1 + c * x1 - r
part2 = s * (1 - t) * np.cos(x1) + s
return a * np.power(part1, 2) + part2
class Colville(ContinuousProblem):
"""Colville function."""
def __init__(self):
n_dim = 4
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
x3 = solution[2]
x4 = solution[3]
part1 = 100 * np.power(x1 * x1 - x2, 2)
part2 = np.power(x1 - 1, 2)
part3 = np.power(x3 - 1, 2)
part4 = 90 * np.power(x3 * x3 - x4, 2)
part5 = 10.1 * (np.power(x2 - 1, 2) + np.power(x4 - 1, 2))
part6 = 19.8 * (x2 - 1) * (x4 - 1)
return part1 + part2 + part3 + part4 + part5 + part6
class Forrester(ContinuousProblem):
"""Forrester et Al. function."""
def __init__(self):
n_dim = 1
min_vals = np.array([0] * n_dim, np.float)
max_vals = np.array([1] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x = solution[0]
part1 = np.power(6 * x - 2, 2)
part2 = np.sin(12 * x - 4)
return part1 * part2
class GoldsteinPrice(ContinuousProblem):
"""Goldstein-Price function.
Args:
rescaled_form (bool): True if the rescaled form of the Goldstein-Price
function is to be used. Default is False.
"""
def __init__(self, rescaled_form=False):
self.rescaled_form = rescaled_form
n_dim = 2
min_vals = np.array([-2] * n_dim, np.float)
max_vals = np.array([2] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
if self.rescaled_form:
known_min = None
else:
known_min = 3.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
if self.rescaled_form:
return self._eval_rescaled(solution)
else:
return self._eval_not_rescaled(solution)
def _eval_rescaled(self, solution):
n_sol = 4 * solution - 2
part1 = self._eval_not_rescaled(n_sol)
return (1/2.427) * (np.log(part1) - 8.693)
def _eval_not_rescaled(self, solution):
x1 = solution[0]
x1_2 = x1 * x1
x2 = solution[1]
x2_2 = x2 * x2
part1 = (1 + np.power(x1 + x2 + 1, 2) * (19 - 14 * x1 + 3 * x1_2 - 14 *
x2 + 6 * x1 * x2 + 3 * x2_2))
part2 = (30 + np.power(2 * x1 - 3 * x2, 2) * (18 - 32 * x1 + 12 * x1_2
+ 48 * x2 - 36 * x1 * x2
+ 27 * x2_2))
return part1 * part2
class Hartmann3D(ContinuousProblem):
"""Hartmann 3-Dimensional function."""
def __init__(self):
n_dim = 3
min_vals = np.array([0] * n_dim, np.float)
max_vals = np.array([1] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
known_min = -3.86278
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
alpha = np.array([1.0, 1.2, 3.0, 3.2], np.float)
A = np.array([[3.0, 10, 30],
[0.1, 10, 35],
[3.0, 10, 30],
[0.1, 10, 35]], np.float)
P = 0.0001 * np.array([[3689, 1170, 2673],
[4699, 4387, 7470],
[1091, 8732, 5547],
[381, 5743, 8828]], np.float)
part1 = -1 * np.sum(A + np.power(solution - P, 2), axis=1)
return (-1 * np.sum(alpha * np.exp(part1)))
class Hartmann6D(ContinuousProblem):
"""Hartmann 6-Dimensional function.
Args:
rescaled_form (bool): True if the rescaled form of the function is used
False otherwise. Default is False.
"""
def __init__(self, rescaled_form=False):
self.rescaled_form = rescaled_form
n_dim = 6
min_vals = np.array([0] * n_dim, np.float)
max_vals = np.array([1] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
if self.rescaled_form:
known_min = None
else:
known_min = -3.32237
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
alpha = np.array([1.0, 1.2, 3.0, 3.2], np.float)
A = np.array([[10, 3.0, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14]], np.float)
P = 0.0001 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]], np.float)
part1 = np.sum(A + np.power(solution - P, 2), axis=1)
if self.rescaled_form:
return ((-1 / 1.94) * (2.58 + part1))
else:
return -1 * np.sum(alpha * np.exp(part1))
class Perm(ContinuousProblem):
"""Perm d,B function.
Args:
n_dim (int): Number of dimensions.
beta (float): Argument of the function.
"""
def __init__(self, n_dim, beta):
self.beta = beta
min_vals = np.array([-1 * n_dim] * n_dim, np.float)
max_vals = np.array([n_dim] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, n_dim/10.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
j = np.array(range(1, n+1), np.float)
s_mat = np.zeros((n, n), np.float)
j_mat = np.zeros((n, n), np.float)
j_mat2 = np.zeros((n, n), np.float)
for i in range(n):
s_mat[i] += np.power(solution, i+1)
j_mat[i] += 1 / np.power(j, i+1)
j_mat2[i] += np.power(j, i+1)
part1 = np.sum((j_mat2 + self.beta) * (s_mat - 1.0 * j_mat), axis=1)
return np.sum(np.power(part1, 2))
class Powell(ContinuousProblem):
"""Powell function.
Args:
n_dim (int): Number of dimensions. Must be a multiple of 4.
"""
def __init__(self, n_dim):
if n_dim % 4 != 0:
raise ValueError("The number of dimensions must be a "
"multiple of 4")
min_vals = np.array([-4] * n_dim, np.float)
max_vals = np.array([5] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 0.1)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution) // 4
n_sol = solution.copy()
n_sol.resize((n, 4))
value = 0.0
for i in range(n):
x0 = n_sol[i][0]
x1 = n_sol[i][1]
x2 = n_sol[i][2]
x3 = n_sol[i][3]
value += np.power(x0 + 10 * x1, 2)
value += 5 * np.power(x2 - x0, 2)
value += np.power(x1 - 2 * x2, 4)
value += 10 * np.power(x0 - x3, 4)
return value
class Shekel(ContinuousProblem):
"""Shekel function."""
def __init__(self):
n_dim = 4
min_vals = np.array([0] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -10.5364
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
m = 10
beta = 1/10 * np.array([1, 2, 2, 4, 4, 6, 3, 7, 5, 5], np.float)
C = np.array([[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3],
[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],
[4, 1, 8, 6, 7, 9, 3, 1, 2, 3]], np.float)
outer = 0.0
for i in range(m):
inner = 0.0
for j in range(4):
inner += np.power(solution[j] - C[j][i], 2) + beta[i]
inner = np.power(inner, -1)
outer += inner
return -1 * outer
class StyblinskiTang(ContinuousProblem):
"""Styblinski-Tang function.
Args:
n_dim (int): Number of dimensions. Must be a multiple of 4.
"""
def __init__(self, n_dim):
min_vals = np.array([-5] * n_dim, np.float)
max_vals = np.array([5] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
s_4 = np.power(solution, 4)
s_2 = np.power(solution, 2)
return (1/2) * np.sum(s_4 - 16 * s_2 + 5 * solution)
|
|
"""
Implementations of different cartridge types.
"""
class PBCartridge(object):
MAXBANKS = 8
BANKSIZE = 0x0400
def __init__(self, file_name):
self.max_banks = PBCartridge.MAXBANKS
self.bank_size = PBCartridge.BANKSIZE
self._slice = [0]*4
self._slice[0] = 4
self._slice[1] = 5
self._slice[2] = 6
self._slice[3] = 7
self.num_banks = 0
self.current_bank = 0
self._file_name = file_name
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['current_bank'] = self.current_bank
state['file_name'] = self._file_name
state['slices'] = list(self._slice)
return state
def set_save_state(self, state):
self.current_bank = state['current_bank']
self._file_name = state['file_name']
self._slice = list(state['slices'])
def get_absolute_address(self, address):
absolute = self.bank_size * self._slice[(address & 0xC00) >> 10] + (address & 0x3FF)
return absolute
def write(self, address, data):
address = address & 0xFFF
if 0xFE0 == (address & 0xFF8):
self._slice[0] = address & 0x7
elif 0xFE8 == (address & 0xFF8):
self._slice[1] = address & 0x7
elif 0xFF0 == (address & 0xFF8):
self._slice[2] = address & 0x7
def read(self, address):
"""
0xFF6 == address: Last bank - 3
0xFF7 == address: Last bank - 2
0xFF8 == address: Last bank - 1
0xFF9 == address: Last bank
"""
address = address & 0xFFF
if 0xFE0 == (address & 0xFF8):
self._slice[0] = address & 0x7
elif 0xFE8 == (address & 0xFF8):
self._slice[1] = address & 0x7
elif 0xFF0 == (address & 0xFF8):
self._slice[2] = address & 0x7
return self.cartridge_banks[self._slice[(address & 0xC00) >> 10]][address & 0x3FF]
def _load_cartridge(self, filename):
bytes_read = 0
total_bytes_read = 0
self.max_cartridge = [[]] * self.max_banks
print("Opening: ", filename)
with open(filename, 'rb') as rom_file:
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if (bytes_read != 0):
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
if (bytes_read > 0) and (bytes_read < self.bank_size):
print("Warning: Short Cartridge")
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("PBCartridge read:")
print(" banks =", self.num_banks)
print(" bytes =", total_bytes_read)
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class MNetworkCartridge(object):
MAXBANKS = 8
BANKSIZE = 0x0800
RAMSIZE = 0x0800
def __init__(self, file_name):
self.max_banks = MNetworkCartridge.MAXBANKS
self.bank_size = MNetworkCartridge.BANKSIZE
self.ram_size = MNetworkCartridge.RAMSIZE
self.num_banks = 0
self.bank_select = 0
self.ram_select = 0
self.ram = []
self._file_name = file_name
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['ram'] = list(self.ram)
state['current_bank'] = self.current_bank
state['ram_select'] = self.ram_select
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self.ram = list(state['ram'])
self.current_bank = state['current_bank']
self.ram_select = state['ram_select']
self._file_name = state['file_name']
def get_absolute_address(self, address):
bank = self.bank_select
if ((address & 0xF00) >= 0xA00):
bank = 7
return bank * self.bank_size + (address & 0x7FF)
def write(self, address, data):
address = address & 0xFFF
if 0xFE0 == (address & 0xFF8):
# Bank select 0 to 7
self.bank_select = address & 0x7
elif 0xFE8 == (address & 0xFF8):
# 256k Ram select.
self.ram_select = address & 0x3
if (self.bank_select == 7 and 0x000 == (address & 0x800)):
self.ram[address & 0x3FF] = data
elif 0x800 == (address & 0xF00):
# Selectable 256Kb RAM. write on 1800-18FF
self.ram[(address & 0x7FF) | 0x400 | (self.ram_select << 8)] = data
else:
print("Invalid write address %x"%(address))
def read(self, address):
address = address & 0xFFF
if (0xFE0 == (address & 0xFF8)):
self.bank_select = address & 0x7
elif (0xFE8 == (address & 0xFF8)):
self.ram_select = address & 0x3
if ((self.bank_select == 7) and (0x400 == (address & 0xC00))):
# Return reads from ram.
byte = self.ram[address & 0x3FF]
elif (0x000 == (address & 0x800)):
# Return cartridge select.
byte = self.cartridge_banks[self.bank_select][address & 0x7FF]
elif (0x900 == (address & 0xF00)):
# Selectable 256Kb RAM. write on 1800-18FF
byte = self.ram[(address & 0x7FF) | 0x400 | (self.ram_select << 8)]
elif ((address & 0xF00) >= 0xA00):
# Return fixed cartridge location.
byte = self.cartridge_banks[7][address & 0x7FF]
else:
print("Invalid address %x"%(address))
byte = 0
return byte
def _load_cartridge(self, filename):
bytes_read = 0
total_bytes_read = 0
self.max_cartridge = [[]] * self.max_banks
print("Opening: ", filename)
self.ram = [] * self.RAMSIZE
with open(filename, 'rb') as rom_file:
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if bytes_read != 0:
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("MNetworkCartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_banks[0]))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class FECartridge(object):
def __init__(self, file_name, max_banks, bank_size):
self.max_banks = max_banks
self.bank_size = bank_size
self.cartridge_banks = [[]] * self.max_banks
self.num_banks = 0
self.current_bank = 0
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['current_bank'] = self.current_bank
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self.current_bank = state['current_bank']
self._file_name = state['file_name']
def get_absolute_address(self, address):
if 0x0000 == (address & 0x2000):
current_bank = 1
elif 0x2000 == (address & 0x2000):
current_bank = 0
return current_bank * self.bank_size + (address & 0xFFF)
def read(self, address):
if 0x0000 == (address & 0x2000):
self.current_bank = 1
elif 0x2000 == (address & 0x2000):
self.current_bank = 0
address = address & 0xFFF
return self.cartridge_banks[self.current_bank][address]
def write(self, address, data):
if 0x0000 == (address & 0x2000):
self.current_bank = 1
elif 0x2000 == (address & 0x2000):
self.current_bank = 0
def _load_cartridge(self, filename):
total_bytes_read = 0
print("Opening:", filename)
with open(filename, 'rb') as rom_file:
self.max_cartridge = [[]] * self.max_banks
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
print("nb:%d,%x"%(self.num_banks, self.bank_size))
if bytes_read != 0:
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("Cartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_banks[0]))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class SingleBankCartridge(object):
""" Simple, single bank cartridge, no bank switching. """
def __init__(self, file_name, bank_size):
self.bank_size = bank_size
self.cartridge_bank = []
self.num_banks = 0
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self._file_name = state['file_name']
def get_absolute_address(self, address):
return (address & 0xFFF)
def read(self, address):
return self.cartridge_bank[address & 0xFFF]
def write(self, address, data):
pass
def _load_cartridge(self, filename):
total_bytes_read = 0
print("Opening:", filename)
with open(filename, 'rb') as rom_file:
self.max_cartridge = []
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if (bytes_read > 0) and (bytes_read < self.bank_size):
# If the bank is short, pad it with zeros.
bank += '\000' * (self.bank_size-bytes_read)
# If the read size was less than a half bank, copy the
# shortfall.
if bytes_read <= self.bank_size/2:
bank = bank[0:self.bank_size/2] + bank[0:self.bank_size/2]
self.max_cartridge = bank[0:self.bank_size/2] + bank[0:self.bank_size/2]
self.max_cartridge = bytearray(bank)
total_bytes_read += bytes_read
self.cartridge_bank = []
self.cartridge_bank = self.max_cartridge
print("Cartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_bank))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class GenericCartridge(object):
def __init__(self, file_name, max_banks, bank_size, hot_swap, ram_size):
self.max_banks = max_banks
self.bank_size = bank_size
self.hot_swap = hot_swap
self.ram_size = ram_size
self.ram_addr_mask = 0xFFFF & (self.ram_size - 1)
self.cartridge_banks = [[]] * self.max_banks
self.ram = []
self.num_banks = 0
self.current_bank = 0
self.bank_select = 0
self._file_name = file_name
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['ram'] = list(self.ram)
state['current_bank'] = self.current_bank
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self.ram = list(state['ram'])
self.current_bank = state['current_bank']
self._file_name = state['file_name']
def get_absolute_address(self, address):
return self.bank_size * self.current_bank + (address & 0xFFF)
def read(self, address):
address = address & 0xFFF
if (self.ram_size > 0) and (address < 2*self.ram_size) and (address >= self.ram_size):
data = self.ram[address & self.ram_addr_mask]
else:
# 0xFF8 == address: Last bank - 2
# 0xFF9 == address: Last bank - 1
# 0xFFA == address: Last bank
if (((self.hot_swap +1) - self.num_banks) <= address) and ((self.hot_swap+1) > address):
self.current_bank = self.num_banks - ((self.hot_swap+1) - address)
data = self.cartridge_banks[self.current_bank][address]
return data
def write(self, address, data):
address = address & 0xFFF
if (self.ram_size > 0) and (address < self.ram_size):
self.ram[address & self.ram_addr_mask] = data
if (((self.hot_swap+1) - self.num_banks) <= address) and ((self.hot_swap+1) > address):
self.current_bank = self.num_banks - ((self.hot_swap+1) - address)
def _load_cartridge(self, filename):
total_bytes_read = 0
print("Opening:", filename)
with open(filename, 'rb') as rom_file:
if (self.ram_size > 0):
self.ram = [0] * self.ram_size
self.max_cartridge = [[]] * self.max_banks
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if (bytes_read > 0) and (bytes_read < self.bank_size):
# If the bank is short, pad it with zeros.
bank += bytearray('\000'.encode() * (self.bank_size-bytes_read))
# If the read size was less than a half bank, copy the
# shortfall.
if bytes_read <= int(self.bank_size/2):
bank = bank[0:int(self.bank_size/2)] + bank[0:int(self.bank_size/2)]
self.max_cartridge[self.num_banks] = bank[0:int(self.bank_size/2)] + bank[0:int(self.bank_size/2)]
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("Cartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_banks[0]))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
if __name__ == '__main__':
import sys
new_generic_cart = GenericCartridge(sys.argv[1], 4, 0x1000, 0xFF9, 0x0)
print(new_generic_cart.read(0), new_generic_cart.read(1))
new_pb_cart = PBCartridge(sys.argv[1])
print(new_pb_cart.read(0), new_pb_cart.read(1))
|
|
from __future__ import division
import json
import os.path
from nflgame import OrderedDict
import nflgame.seq
import nflgame.statmap
_player_json_file = os.path.join(os.path.dirname(__file__), 'players.json')
def _create_players(jsonf=None):
"""
Creates a dict of Player objects from the players.json file, keyed
by GSIS ids.
"""
if jsonf is None:
jsonf = _player_json_file
try:
data = json.loads(open(jsonf).read())
except IOError:
return {}
players = {}
for playerid in data:
players[playerid] = Player(data[playerid])
return players
class Player (object):
"""
Player instances represent meta information about a single player.
This information includes name, team, position, status, height,
weight, college, jersey number, birth date, years, pro, etc.
Player information is populated from NFL.com profile pages.
"""
def __init__(self, data):
self.player_id = data['gsis_id']
self.gsis_name = data.get('gsis_name', '')
self.full_name = data.get('full_name', '')
self.first_name = data.get('first_name', '')
self.last_name = data.get('last_name', '')
self.team = data.get('team', '')
self.position = data.get('position', '')
self.profile_id = data.get('profile_id', 0)
self.profile_url = data.get('profile_url', '')
self.uniform_number = data.get('number', 0)
self.birthdate = data.get('birthdate', '')
self.college = data.get('college', '')
self.height = data.get('height', '')
self.weight = data.get('weight', '')
self.years_pro = data.get('years_pro', 0)
self.status = data.get('status', '')
# API backwards compatibility.
self.gsis_id = self.player_id
self.playerid = self.player_id
self.name = self.full_name
self.number = self.uniform_number
def stats(self, year, week=None):
games = nflgame.games(year, week)
players = list(nflgame.combine(games).filter(playerid=self.playerid))
if len(players) == 0:
return GamePlayerStats(self.player_id, self.gsis_name,
None, self.team)
return players[0]
def plays(self, year, week=None):
plays = []
games = nflgame.games(year, week)
for g in games:
plays += filter(lambda p: p.has_player(self.playerid),
list(g.drives.plays()))
return nflgame.seq.GenPlays(plays)
def __str__(self):
return '%s (%s, %s)' % (self.name, self.position, self.team)
class PlayerDefense (Player):
def __init__(self, team):
self.playerid = None
self.name = team
self.team = team
self.position = 'DEF'
def stats(self, year, week=None):
assert False, 'Cannot be called on a defense.'
def plays(self, year, week=None):
assert False, 'Cannot be called on a defense.'
def __str__(self):
return '%s Defense' % self.team
class PlayerStats (object):
"""
Player represents a single player and all of his statistical categories.
Every player has 'playerid', 'name' and 'home' fields.
Additionally, depending upon which statistical categories that player
was involved in for the game, he'll have properties such as 'passing_tds',
'rushing_yds', 'defense_int' and 'kicking_fgm'.
In order to know whether a paricular player belongs to a statical category,
you may use the filtering methods of a player sequence or alternatively,
use the has_cat method with arguments like 'passing', 'rushing', 'kicking',
etc. (A player sequence in this case would be an instance of
GenPlayerStats.)
You may also inspect whether a player has a certain property by using
the special __dict__ attribute. For example::
if 'passing_yds' in player.__dict__:
# Do something with player.passing_yds
"""
def __init__(self, playerid, name, home, team):
"""
Create a new Player instance with the player id (from NFL.com's
GameCenter), the player's name (e.g., "T.Brady") and whether the
player is playing in a home game or not.
"""
self.playerid = playerid
self.name = name
self.home = home
self.team = team
self._stats = OrderedDict()
self.player = None
if self.playerid in nflgame.players:
self.player = nflgame.players[self.playerid]
def has_cat(self, cat):
for f in self._stats:
if f.startswith(cat):
return True
return False
@property
def guess_position(self):
"""
Guesses the position of this player based on the statistical
categories present in this object when player meta is not
present.
Note that if this resorts to a guess, then it will be more
effective on aggregate data rather than data from just a
single play. (e.g., if a QB runs the ball, and that's the
only data available, the position returned will be RB.)
When a position is guessed, only the following positions will
be returned: QB, RB, WR, DEF, K and P.
"""
# Look for the player meta first. Duh.
if self.player is not None:
return self.player.position
stats = [
(self.passing_att, 'QB'),
(self.rushing_att, 'RB'),
(self.receiving_tar, 'WR'),
(self.defense_tkl, 'DEF'),
(self.defense_ast, 'DEF'),
(self.kicking_tot, 'K'),
(self.kicking_fga, 'K'),
(self.punting_tot, 'P'),
]
return sorted(stats, reverse=True)[0][1]
@property
def tds(self):
"""
Returns the total number of touchdowns credited to this player across
all statistical categories.
"""
n = 0
for f, v in self.__dict__.iteritems():
if f.endswith('tds'):
n += v
return n
@property
def twopta(self):
"""
Returns the total number of two point conversion attempts for
the passing, rushing and receiving categories.
"""
return (self.passing_twopta
+ self.rushing_twopta
+ self.receiving_twopta)
@property
def twoptm(self):
"""
Returns the total number of two point conversions for
the passing, rushing and receiving categories.
"""
return (self.passing_twoptm
+ self.rushing_twoptm
+ self.receiving_twoptm)
@property
def twoptmissed(self):
"""
Returns the total number of two point conversion failures for
the passing, rushing and receiving categories.
"""
return (self.passing_twoptmissed
+ self.rushing_twoptmissed
+ self.receiving_twoptmissed)
@property
def stats(self):
"""
Returns a dict of all stats for the player.
"""
return self._stats
def formatted_stats(self):
"""
Returns a roughly-formatted string of all statistics for this player.
"""
s = []
for stat, val in self._stats.iteritems():
s.append('%s: %s' % (stat, val))
return ', '.join(s)
def _add_stats(self, stats):
for k, v in stats.iteritems():
self.__dict__[k] = self.__dict__.get(k, 0) + v
self._stats[k] = self.__dict__[k]
def _overwrite_stats(self, stats):
for k, v in stats.iteritems():
self.__dict__[k] = v
self._stats[k] = self.__dict__[k]
def __str__(self):
"""
Simply returns the player's name, e.g., "T.Brady".
"""
return self.name
def __add__(self, other):
"""
Adds two players together. Only two player objects that correspond
to the same human (i.e., GameCenter identifier) can be added together.
If two different players are added together, an assertion will
be raised.
The effect of adding two player objects simply corresponds to the
sums of all statistical values.
Note that as soon as two players have been added, the 'home' property
becomes undefined if the two operands have different values of 'home'.
"""
assert self.playerid == other.playerid
assert type(self) == type(other)
if self.home != other.home:
home = None
else:
home = self.home
new_player = self.__class__(self.playerid, self.name, home, self.team)
new_player._add_stats(self._stats)
new_player._add_stats(other._stats)
return new_player
def __sub__(self, other):
assert self.playerid == other.playerid
assert type(self) == type(other)
new_player = GamePlayerStats(self.playerid,
self.name, self.home, self.team)
new_player._add_stats(self._stats)
for bk, bv in other._stats.iteritems():
if bk not in new_player._stats: # stat was taken away? ignore.
continue
new_player._stats[bk] -= bv
if new_player._stats[bk] == 0:
del new_player._stats[bk]
else:
new_player.__dict__[bk] = new_player._stats[bk]
anydiffs = False
for k, v in new_player._stats.iteritems():
if v > 0:
anydiffs = True
break
if not anydiffs:
return None
return new_player
def __getattr__(self, name):
# If name has one of the categories as a prefix, then return
# a default value of zero
for cat in nflgame.statmap.categories:
if name.startswith(cat):
return 0
raise AttributeError
def passer_rating(self):
"""
Calculate and return the passer rating using the NFL formula. Passer
rating is calculated using a player's passing attempts, completions,
yards, touchdowns, and interceptions. Passer rating in the NFL is on a
scale from 0 to 158.3.
"""
l = [((self.passing_cmp / self.passing_att) - .3) * 5]
l.append(((self.passing_yds / self.passing_att) - 3) * .25)
l.append((self.tds / self.passing_att) * 20)
l.append(2.375 - (self.passing_ints / self.passing_att * 25))
m = []
for a in l:
if a < 0:
a = 0
m.append(a)
elif a > 2.375:
a = 2.375
m.append(a)
else:
m.append(a)
rating = round((sum(m) / 6) * 100, 1)
return rating
class GamePlayerStats (PlayerStats):
def __init__(self, playerid, name, home, team):
super(GamePlayerStats, self).__init__(playerid, name, home, team)
self.games = 1
def __add__(self, other):
new_player = super(GamePlayerStats, self).__add__(other)
new_player.games = self.games + other.games
return new_player
class PlayPlayerStats (PlayerStats):
pass
|
|
"""Models for SQLAlchemy.
This file contains the model definitions for schema version 16,
used by Home Assistant Core 2021.6.0, which was the initial version
to include long term statistics.
It is used to test the schema migration logic.
"""
import json
import logging
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Identity,
Index,
Integer,
String,
Text,
distinct,
)
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm.session import Session
from homeassistant.const import (
MAX_LENGTH_EVENT_CONTEXT_ID,
MAX_LENGTH_EVENT_EVENT_TYPE,
MAX_LENGTH_EVENT_ORIGIN,
MAX_LENGTH_STATE_DOMAIN,
MAX_LENGTH_STATE_ENTITY_ID,
MAX_LENGTH_STATE_STATE,
)
from homeassistant.core import Context, Event, EventOrigin, State, split_entity_id
from homeassistant.helpers.json import JSONEncoder
import homeassistant.util.dt as dt_util
# SQLAlchemy Schema
# pylint: disable=invalid-name
Base = declarative_base()
SCHEMA_VERSION = 16
_LOGGER = logging.getLogger(__name__)
DB_TIMEZONE = "+00:00"
TABLE_EVENTS = "events"
TABLE_STATES = "states"
TABLE_RECORDER_RUNS = "recorder_runs"
TABLE_SCHEMA_CHANGES = "schema_changes"
TABLE_STATISTICS = "statistics"
ALL_TABLES = [
TABLE_STATES,
TABLE_EVENTS,
TABLE_RECORDER_RUNS,
TABLE_SCHEMA_CHANGES,
TABLE_STATISTICS,
]
DATETIME_TYPE = DateTime(timezone=True).with_variant(
mysql.DATETIME(timezone=True, fsp=6), "mysql"
)
class Events(Base): # type: ignore
"""Event history data."""
__table_args__ = {
"mysql_default_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
}
__tablename__ = TABLE_EVENTS
event_id = Column(Integer, Identity(), primary_key=True)
event_type = Column(String(MAX_LENGTH_EVENT_EVENT_TYPE))
event_data = Column(Text().with_variant(mysql.LONGTEXT, "mysql"))
origin = Column(String(MAX_LENGTH_EVENT_ORIGIN))
time_fired = Column(DATETIME_TYPE, index=True)
created = Column(DATETIME_TYPE, default=dt_util.utcnow)
context_id = Column(String(MAX_LENGTH_EVENT_CONTEXT_ID), index=True)
context_user_id = Column(String(MAX_LENGTH_EVENT_CONTEXT_ID), index=True)
context_parent_id = Column(String(MAX_LENGTH_EVENT_CONTEXT_ID), index=True)
__table_args__ = (
# Used for fetching events at a specific time
# see logbook
Index("ix_events_event_type_time_fired", "event_type", "time_fired"),
)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
return (
f"<recorder.Events("
f"id={self.event_id}, type='{self.event_type}', data='{self.event_data}', "
f"origin='{self.origin}', time_fired='{self.time_fired}'"
f")>"
)
@staticmethod
def from_event(event, event_data=None):
"""Create an event database object from a native event."""
return Events(
event_type=event.event_type,
event_data=event_data or json.dumps(event.data, cls=JSONEncoder),
origin=str(event.origin.value),
time_fired=event.time_fired,
context_id=event.context.id,
context_user_id=event.context.user_id,
context_parent_id=event.context.parent_id,
)
def to_native(self, validate_entity_id=True):
"""Convert to a natve HA Event."""
context = Context(
id=self.context_id,
user_id=self.context_user_id,
parent_id=self.context_parent_id,
)
try:
return Event(
self.event_type,
json.loads(self.event_data),
EventOrigin(self.origin),
process_timestamp(self.time_fired),
context=context,
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting to event: %s", self)
return None
class States(Base): # type: ignore
"""State change history."""
__table_args__ = {
"mysql_default_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
}
__tablename__ = TABLE_STATES
state_id = Column(Integer, Identity(), primary_key=True)
domain = Column(String(MAX_LENGTH_STATE_DOMAIN))
entity_id = Column(String(MAX_LENGTH_STATE_ENTITY_ID))
state = Column(String(MAX_LENGTH_STATE_STATE))
attributes = Column(Text().with_variant(mysql.LONGTEXT, "mysql"))
event_id = Column(
Integer, ForeignKey("events.event_id", ondelete="CASCADE"), index=True
)
last_changed = Column(DATETIME_TYPE, default=dt_util.utcnow)
last_updated = Column(DATETIME_TYPE, default=dt_util.utcnow, index=True)
created = Column(DATETIME_TYPE, default=dt_util.utcnow)
old_state_id = Column(Integer, ForeignKey("states.state_id"), index=True)
event = relationship("Events", uselist=False)
old_state = relationship("States", remote_side=[state_id])
__table_args__ = (
# Used for fetching the state of entities at a specific time
# (get_states in history.py)
Index("ix_states_entity_id_last_updated", "entity_id", "last_updated"),
)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
return (
f"<recorder.States("
f"id={self.state_id}, domain='{self.domain}', entity_id='{self.entity_id}', "
f"state='{self.state}', event_id='{self.event_id}', "
f"last_updated='{self.last_updated.isoformat(sep=' ', timespec='seconds')}', "
f"old_state_id={self.old_state_id}"
f")>"
)
@staticmethod
def from_event(event):
"""Create object from a state_changed event."""
entity_id = event.data["entity_id"]
state = event.data.get("new_state")
dbstate = States(entity_id=entity_id)
# State got deleted
if state is None:
dbstate.state = ""
dbstate.domain = split_entity_id(entity_id)[0]
dbstate.attributes = "{}"
dbstate.last_changed = event.time_fired
dbstate.last_updated = event.time_fired
else:
dbstate.domain = state.domain
dbstate.state = state.state
dbstate.attributes = json.dumps(dict(state.attributes), cls=JSONEncoder)
dbstate.last_changed = state.last_changed
dbstate.last_updated = state.last_updated
return dbstate
def to_native(self, validate_entity_id=True):
"""Convert to an HA state object."""
try:
return State(
self.entity_id,
self.state,
json.loads(self.attributes),
process_timestamp(self.last_changed),
process_timestamp(self.last_updated),
# Join the events table on event_id to get the context instead
# as it will always be there for state_changed events
context=Context(id=None),
validate_entity_id=validate_entity_id,
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", self)
return None
class Statistics(Base): # type: ignore
"""Statistics."""
__table_args__ = {
"mysql_default_charset": "utf8mb4",
"mysql_collate": "utf8mb4_unicode_ci",
}
__tablename__ = TABLE_STATISTICS
id = Column(Integer, primary_key=True)
created = Column(DATETIME_TYPE, default=dt_util.utcnow)
source = Column(String(32))
statistic_id = Column(String(255))
start = Column(DATETIME_TYPE, index=True)
mean = Column(Float())
min = Column(Float())
max = Column(Float())
last_reset = Column(DATETIME_TYPE)
state = Column(Float())
sum = Column(Float())
__table_args__ = (
# Used for fetching statistics for a certain entity at a specific time
Index("ix_statistics_statistic_id_start", "statistic_id", "start"),
)
@staticmethod
def from_stats(source, statistic_id, start, stats):
"""Create object from a statistics."""
return Statistics(
source=source,
statistic_id=statistic_id,
start=start,
**stats,
)
class RecorderRuns(Base): # type: ignore
"""Representation of recorder run."""
__tablename__ = TABLE_RECORDER_RUNS
run_id = Column(Integer, Identity(), primary_key=True)
start = Column(DateTime(timezone=True), default=dt_util.utcnow)
end = Column(DateTime(timezone=True))
closed_incorrect = Column(Boolean, default=False)
created = Column(DateTime(timezone=True), default=dt_util.utcnow)
__table_args__ = (Index("ix_recorder_runs_start_end", "start", "end"),)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
end = (
f"'{self.end.isoformat(sep=' ', timespec='seconds')}'" if self.end else None
)
return (
f"<recorder.RecorderRuns("
f"id={self.run_id}, start='{self.start.isoformat(sep=' ', timespec='seconds')}', "
f"end={end}, closed_incorrect={self.closed_incorrect}, "
f"created='{self.created.isoformat(sep=' ', timespec='seconds')}'"
f")>"
)
def entity_ids(self, point_in_time=None):
"""Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
session = Session.object_session(self)
assert session is not None, "RecorderRuns need to be persisted"
query = session.query(distinct(States.entity_id)).filter(
States.last_updated >= self.start
)
if point_in_time is not None:
query = query.filter(States.last_updated < point_in_time)
elif self.end is not None:
query = query.filter(States.last_updated < self.end)
return [row[0] for row in query]
def to_native(self, validate_entity_id=True):
"""Return self, native format is this model."""
return self
class SchemaChanges(Base): # type: ignore
"""Representation of schema version changes."""
__tablename__ = TABLE_SCHEMA_CHANGES
change_id = Column(Integer, Identity(), primary_key=True)
schema_version = Column(Integer)
changed = Column(DateTime(timezone=True), default=dt_util.utcnow)
def __repr__(self) -> str:
"""Return string representation of instance for debugging."""
return (
f"<recorder.SchemaChanges("
f"id={self.change_id}, schema_version={self.schema_version}, "
f"changed='{self.changed.isoformat(sep=' ', timespec='seconds')}'"
f")>"
)
def process_timestamp(ts):
"""Process a timestamp into datetime object."""
if ts is None:
return None
if ts.tzinfo is None:
return ts.replace(tzinfo=dt_util.UTC)
return dt_util.as_utc(ts)
def process_timestamp_to_utc_isoformat(ts):
"""Process a timestamp into UTC isotime."""
if ts is None:
return None
if ts.tzinfo == dt_util.UTC:
return ts.isoformat()
if ts.tzinfo is None:
return f"{ts.isoformat()}{DB_TIMEZONE}"
return ts.astimezone(dt_util.UTC).isoformat()
class LazyState(State):
"""A lazy version of core State."""
__slots__ = [
"_row",
"entity_id",
"state",
"_attributes",
"_last_changed",
"_last_updated",
"_context",
]
def __init__(self, row): # pylint: disable=super-init-not-called
"""Init the lazy state."""
self._row = row
self.entity_id = self._row.entity_id
self.state = self._row.state or ""
self._attributes = None
self._last_changed = None
self._last_updated = None
self._context = None
@property # type: ignore
def attributes(self):
"""State attributes."""
if not self._attributes:
try:
self._attributes = json.loads(self._row.attributes)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", self._row)
self._attributes = {}
return self._attributes
@attributes.setter
def attributes(self, value):
"""Set attributes."""
self._attributes = value
@property # type: ignore
def context(self):
"""State context."""
if not self._context:
self._context = Context(id=None)
return self._context
@context.setter
def context(self, value):
"""Set context."""
self._context = value
@property # type: ignore
def last_changed(self):
"""Last changed datetime."""
if not self._last_changed:
self._last_changed = process_timestamp(self._row.last_changed)
return self._last_changed
@last_changed.setter
def last_changed(self, value):
"""Set last changed datetime."""
self._last_changed = value
@property # type: ignore
def last_updated(self):
"""Last updated datetime."""
if not self._last_updated:
self._last_updated = process_timestamp(self._row.last_updated)
return self._last_updated
@last_updated.setter
def last_updated(self, value):
"""Set last updated datetime."""
self._last_updated = value
def as_dict(self):
"""Return a dict representation of the LazyState.
Async friendly.
To be used for JSON serialization.
"""
if self._last_changed:
last_changed_isoformat = self._last_changed.isoformat()
else:
last_changed_isoformat = process_timestamp_to_utc_isoformat(
self._row.last_changed
)
if self._last_updated:
last_updated_isoformat = self._last_updated.isoformat()
else:
last_updated_isoformat = process_timestamp_to_utc_isoformat(
self._row.last_updated
)
return {
"entity_id": self.entity_id,
"state": self.state,
"attributes": self._attributes or self.attributes,
"last_changed": last_changed_isoformat,
"last_updated": last_updated_isoformat,
}
def __eq__(self, other):
"""Return the comparison."""
return (
other.__class__ in [self.__class__, State]
and self.entity_id == other.entity_id
and self.state == other.state
and self.attributes == other.attributes
)
|
|
"""
.. module:: gibson
:synopsis: A high-performance XML difference engine for comparing XML
pairs, and extracting data from pairs whose text elements are
formatted in a specific "mo money" manner.
.. moduleauthor:: Emily Langer <emily.langer@expeditors.com>
"""
import re
import logging
from dateutil import parser
import datetime
logger = logging.getLogger('PynetServer')
def diff_xml(expected_xml, live_xml, validate_strict_order, xpaths_to_ignore=None):
"""
Diffs two ElementTree Elements.
Takes two Element instances, transforms them into two lists, one for
expected, one for live data. Each of those contains lists of tuples
representing the elements contained in the respective XML trees.
These lists are diffed, and variables in $${} format are extracted.
xpaths_to_ignore takes a list of strings which is passed to
get_comparison_objects. Elements that match are ignored and not validated.
:param expected_xml: XML containing the expected data.
:type expected_xml: xml.etree.ElementTree.Element
:param live_xml: XML containing the live response data.
:type live_xml: xml.etree.ElementTree.Element
:param xpaths_to_ignore: List of XPath strings which should be used
to filter elements that should not be diffed.
:type xpaths_to_ignore: None, list
:returns: List of tuples: (Parsed variables dict, non-matching elements from expected_xml,
non-matching elements from live_xml)
:rtype: tuple
"""
expected_xml_objects = get_comparison_objects(expected_xml, xpaths_to_ignore)
live_xml_objects = get_comparison_objects(live_xml, xpaths_to_ignore)
expected_xml_objects, live_xml_objects = compare_lists_strict(expected_xml_objects, live_xml_objects,
validate_strict_order)
mo_money_dict, expected_xml_objects, live_xml_objects = get_mo_money(expected_xml_objects, live_xml_objects)
return mo_money_dict, expected_xml_objects, live_xml_objects
def get_comparison_objects(xml_element, xpaths_to_ignore=None):
"""
Builds list-of-tuples representations of each element and its ancestors.
Python's xml.etree.ElementTree module does not offer Element objects
whose contents can be directly compared. To address this issue in an
efficient way, we expand Elements into representations comprised of
Python built-ins, which can be compared very quickly and cheaply.
Each childless Element is represented by a (tag, attrib, text) tuple,
and each of its ancestors is represented only by (tag, attrib), as
the text attribute of those Elements is typically whitespace, which
we want to ignore for our purposes.
xpaths_to_ignore takes a list of strings to use as args for
Element.findall(). Elements that match are discarded (not returned)
as they are not to be validated.
:param xml_element: The XML object to be broken into a list of tuples.
:type xml_element: xml.etree.ElementTree.Element
:param xpaths_to_ignore: List of XPath strings which should be used
to filter elements in xml_element.
:type xpaths_to_ignore: None, list
:returns: List of tuples that represent xml_element and its children.
:rtype: list
"""
# Build dict of {child: parent} relationships
parent_map = {child: parent for parent in xml_element.iter() for child in parent}
# This is where all of our childless nodes go for later comparison.
compare_list = []
# Generate a set of all elements which match the xpaths_to_ignore.
ignore_set = set()
if xpaths_to_ignore:
for xpath in xpaths_to_ignore:
ignore_set.update(xml_element.findall(xpath))
# Handling the root by itself lets us bypass the check for None after .get()ting
# from the ancestor_map in the loop below. Root has no parent_info.
ancestor_map = {}
if xml_element not in ignore_set:
ancestor_map[xml_element] = [(xml_element.tag, xml_element.attrib)]
# If there are no children, we want to compare everything.
if not list(xml_element):
compare_list.append([(xml_element.tag, xml_element.attrib, xml_element.text)])
xml_element_iter = xml_element.iter()
next(xml_element_iter) # Skip the root, because we've already handled it.
for elem in xml_element_iter:
parent = parent_map.get(elem)
parent_info = ancestor_map.get(parent)
# No parent info means the parent was ignored, and elem should be too.
# TODO: This implementation ignores cases where an element whose contents
# we don't care about goes missing entirely, which might not be what we
# what we want. We might want to make sure the element is at least present.
# This complicates the ignore process, but is possible. Evaluate how
# important that might be and implement if necessary.
if elem in ignore_set or parent_info is None:
continue
if list(elem): # See if the elem has children
# If so, don't track the element's text. 99% of the time, it's just
# the whitespace between elements, which we don't want to validate.
elem_info = [(elem.tag, elem.attrib)]
elem_info.extend(parent_info)
ancestor_map[elem] = elem_info
else:
# If there are no children, track everything
elem_info = [(elem.tag, elem.attrib, elem.text)]
elem_info.extend(parent_info)
compare_list.append(elem_info)
# Reorders things to be more XPath-like, like a person would expect.
# We should probably do this at the end, when there isn't much left to reverse.
for item in compare_list:
item.reverse()
return compare_list
def compare_lists_strict(list_a, list_b, validate_strict_order=False):
"""
Compares two lists, keeping both ordered and accounting for duplicates.
Compare two lists without using a shortcut like set(list_a) - set(list_b)
or a simple list comprehension because those don't account for the
possibility that there may be perfect duplicates of list members, and we
need to make sure the correct number of each are present.
:param list_a: First list to compare
:type list_a: list
:param list_b: Second list to compare
:type list_b: list
:returns: Tuple containing the elements of the respective lists which were
not found in the opposing list.
:rtype: tuple
"""
# It's really cheap to do this quick check first,
# and is lightning fast if it works.
if list_a == list_b:
return [], []
else:
if validate_strict_order is True:
for i in range(len(list_a)):
if list_a[i] != list_b[i]:
list_a_leftovers = list_a[i:]
list_b = list_b[i:]
break
else:
# Can't modify the list we're iterating over, and
# this is the fastest way to shallow copy a list
list_a_leftovers = list_a[:]
for a_obj in list_a:
if a_obj in list_b:
# We can't modify the list we iterate over, so we modify the copy.
list_a_leftovers.remove(a_obj)
# But we can modify this one. Makes our final list at the
# same time it shrinks the comparison pool!
list_b.remove(a_obj)
return list_a_leftovers, list_b
def get_mo_money(expected_xml_extras, live_xml_extras):
"""
Parses out "mo money" variables and removes "matched" diff remainders.
Checks two lists of objects returned from get_comparison_objects for
"mo money" variables (in the $${variable} format). If an expected XML
element's text contains a mo money variable, get_mo_money tries to find
its corresponding element in the live XML.
If successful, the value from the live element is saved to a dictionary
with the variable name as its key. The two elements are now considered
matched, and are thus removed from their respective lists of differences.
:param expected_xml_extras: The list of remaining element representations
in the expected XML that were left after comparing to the live XML.
:type expected_xml_extras: list
:param live_xml_extras: The list of remaining element representations
in the live XML that were left after comparing to the expected XML.
:type live_xml_extras: list
:returns: (Parsed variables dict, non-matching elements from expected_xml,
non-matching elements from live_xml)
:raises: ValueError if a mo money variable is specified in the
expected XML, but its match cannot be found in the live XML.
:rtype: tuple
"""
mo_money = {}
# Can't modify the list we're iterating over, and
# this is the fastest way to shallow copy a list
leftover_expected_extras = expected_xml_extras[:]
for expected_extra in expected_xml_extras:
# Go to the last element (the childless node) and
# expand the (tag, attribute, text) tuple.
expected_tag, expected_attrib, expected_text = expected_extra[-1]
if expected_text is None:
expected_text = '' # Element.text returns None, not '', if there's no text.
if expected_text.find('$${') == -1:
continue # Didn't find the opening bracket. Move along.
match = re.search('\$\$\{\s*([\w\_]+)\s*\}', expected_text)
if not match:
continue # False positive during find. There's no closing bracket.
key = match.group(1)
# Compare the ancestry by slicing the childless element off the end.
live_matches = [l for l in live_xml_extras if l[:-1] == expected_extra[:-1]]
found_match = False # Using a flag instead of a for/else for you old-timey Java folks... :)
for live_match in live_matches:
live_tag, live_attrib, live_text = live_match[-1] # Expand just the childless element part.
if live_tag == expected_tag and live_attrib == expected_attrib:
mo_money[key] = live_text
leftover_expected_extras.remove(expected_extra) # These are now "matches".
live_xml_extras.remove(live_match)
found_match = True
break
if found_match is False:
raise ValueError('Could not find expected $${} match in live response! Missing variable', key)
return mo_money, leftover_expected_extras, live_xml_extras
def pretty_print_differences(expected_xml_leftovers, live_xml_leftovers, verbose=False, sort_result=True):
"""
Pretty prints differences output by diff_xml().
Takes diff lists output by diff_xml() and generates a list of strings
for logging purposes. Strings are sorted such that related mismatches are
usually printed next to each other, and lined up with each other in order
to make the differences easier for a human to spot.
:param expected_xml_leftovers: The list of remaining element representations
in the expected XML that were left after comparing to the live XML.
:type expected_xml_leftovers: list
:param live_xml_leftovers: The list of remaining element representations
in the live XML that were left after comparing to the expected XML.
:type live_xml_leftovers: list
:param verbose: If True, element attributes for ancestor elements are also
printed. If False, only tags are returned for ancestors.
:type verbose: bool
:returns: List of strings, each representing one of the input mismatches.
:rtype: list
"""
# Define the transform function once at the top, so we don't have to check
# verbose each time we go to build a string. Speed, speed, speed.
if verbose is True:
elem_tuple_to_str = lambda x: ''.join([x[0], str(x[1])])
else:
elem_tuple_to_str = lambda x: x[0]
# Setting this up lets us de-dupe the loop below.
work_dict = {'Expected: /': expected_xml_leftovers,
'Live : /': live_xml_leftovers}
# Turning the below into a list comprehension is faster... by about 2%.l
# But it's 200% harder to understand.
combined_leftovers = []
for prefix, leftover_list in work_dict.items():
for leftover in leftover_list:
# We keep the string in a few different pieces to allow us to
# sort/group similar elements together, i.e. put expected values
# next to their likely live counterparts.
if leftover[-1][2] is None: # Another check for None. We don't just str() it, cause we don't want
elem_text = '' # the printed string to look like '[None]'.
else:
elem_text = leftover[-1][2]
string_parts = [prefix,
'/'.join([elem_tuple_to_str(e) for e in leftover]),
''.join(['[', elem_text, ']'])]
combined_leftovers.append(string_parts)
# Sort the parts, then join them into their final strings. Sort first
# by ancestry, then put Expected above Live for each ancestry group.
if sort_result is True:
result_strings = [''.join(leftover) for leftover in
sorted(combined_leftovers, key=lambda x: (x[1], x[0]))]
else:
result_strings = [''.join(leftover) for leftover in sorted(combined_leftovers, key=lambda x: x[0])]
return result_strings
def get_num_date_ignore_xpaths(xpaths_range_for_num_date, test_xml, live_xml, xpaths_ignore_num_date):
"""Finds test case level xpaths that are date/number ranges, grabs the range and uses the number to validate
whether the expected response is in acceptable range of the live response. if so, adds that xpath to xpaths
to ignore.
:param xpaths_range_for_num_date: XPath to validate on, along with the number range that considers the
live value acceptable.
:type xpaths_range_for_num_date: list
:param test_xml: XML containing expected data.
:type test_xml: xml.etree.ElementTree.Element
:param live_xml: XML containing the live response data.
:type live_xml: xml.etree.ElementTree.Element
:param xpaths_ignore_num_date:
:type xpaths_ignore_num_date: list
:return: List of qualified XPaths to ignore because the live data fit within the number range provided by
xpaths_range_for_num_date
"""
for m in xpaths_range_for_num_date:
xpath, acceptable_range = m.split(',', 1)
number_or_date_tovalidate = test_xml.findtext(xpath)
if not acceptable_range or not number_or_date_tovalidate:
raise ValueError('Element or value missing from "testXPathAndRangeForNum" element group')
if number_or_date_tovalidate.isdigit():
upper_bound, lower_bound, num_live_int = num_compare(acceptable_range, number_or_date_tovalidate,
live_xml, xpath)
if lower_bound <= num_live_int <= upper_bound:
xpaths_ignore_num_date += [xpath]
else:
upper_bound, lower_bound, real_date = date_compare(acceptable_range, number_or_date_tovalidate,
live_xml, xpath)
try:
if real_date and lower_bound <= real_date <= upper_bound:
xpaths_ignore_num_date += [xpath]
except TypeError:
logger.error("Error: type is not same for expected time and real time")
return xpaths_ignore_num_date
def num_compare(range_no, num_date, live_response_xml, xpath):
range_1 = re.findall('\d+', range_no)[0]
big = int(num_date) + int(range_1)
small = int(num_date) - int(range_1)
num_live = live_response_xml.findtext(xpath)
num_live_int = int(num_live)
return big, small, num_live_int
def date_compare(range_no, num_date, live_response_xml, xpath):
range_all = range_no.strip()[5:]
range_day, range_1 = range_all.split('T')
hour2, minute2, second2_1 = range_1.split(':')
day = range_day
second2 = float(second2_1)
expected_datetime = parser.parse(num_date)
# print(expected_datetime)
bigger = expected_datetime + datetime.timedelta(int(day), int(second2), 0, 0, int(minute2), int(hour2), 0)
smaller = expected_datetime - datetime.timedelta(int(day), int(second2), 0, 0, int(minute2), int(hour2), 0)
time_live = live_response_xml.findtext(xpath)
real_dat1 = ''
datetime.timedelta()
if time_live:
real_dat1 = parser.parse(time_live)
return bigger, smaller, real_dat1
|
|
# Copyright 2019 Microsoft Corporation
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import datetime
import logging
from abc import abstractmethod
from email.utils import parseaddr
import jmespath
from c7n_azure import constants
from c7n_azure.actions.base import AzureBaseAction, AzureEventAction
from c7n_azure.tags import TagHelper
from c7n_azure.utils import StringUtils
from dateutil import tz as tzutils
from msrest import Deserializer
from c7n import utils
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterValidationError
from c7n.filters.offhours import Time
from c7n.utils import type_schema
from c7n.lookup import Lookup
class Tag(AzureBaseAction):
"""Adds tags to Azure resources
:example:
This policy will tag all existing resource groups with a value such as Environment
.. code-block:: yaml
policies:
- name: azure-tag-resourcegroups
resource: azure.resourcegroup
description: |
Tag all existing resource groups with a value such as Environment
actions:
- type: tag
tag: Environment
value: Test
"""
schema = utils.type_schema(
'tag',
**{
'value': Lookup.lookup_type({'type': 'string'}),
'tag': Lookup.lookup_type({'type': 'string'}),
'tags': {'type': 'object'}
}
)
schema_alias = True
log = logging.getLogger('custodian.azure.tagging.Tag')
def __init__(self, data=None, manager=None, log_dir=None):
super(Tag, self).__init__(data, manager, log_dir)
def validate(self):
if not self.data.get('tags') and not (self.data.get('tag') and self.data.get('value')):
raise FilterValidationError(
"Must specify either tags or a tag and value")
if self.data.get('tags') and self.data.get('tag'):
raise FilterValidationError(
"Can't specify both tags and tag, choose one")
return self
def _process_resource(self, resource):
new_tags = self._get_tags(resource)
TagHelper.add_tags(self, resource, new_tags)
def _get_tags(self, resource):
return self.data.get('tags') or {Lookup.extract(
self.data.get('tag'), resource): Lookup.extract(self.data.get('value'), resource)}
class RemoveTag(AzureBaseAction):
"""Removes tags from Azure resources
:example:
This policy will remove tag for all existing resource groups with a key such as Environment
.. code-block:: yaml
policies:
- name: azure-remove-tag-resourcegroups
resource: azure.resourcegroup
description: |
Remove tag for all existing resource groups with a key such as Environment
actions:
- type: untag
tags: ['Environment']
"""
schema = utils.type_schema(
'untag',
tags={'type': 'array', 'items': {'type': 'string'}})
schema_alias = True
def __init__(self, data=None, manager=None, log_dir=None):
super(RemoveTag, self).__init__(data, manager, log_dir)
def validate(self):
if not self.data.get('tags'):
raise FilterValidationError("Must specify tags")
return self
def _prepare_processing(self,):
self.tags_to_delete = self.data.get('tags')
def _process_resource(self, resource):
TagHelper.remove_tags(self, resource, self.tags_to_delete)
class AutoTagBase(AzureEventAction):
default_value = "Unknown"
query_select = "eventTimestamp, operationName"
max_query_days = 90
schema = utils.type_schema(
'auto-tag-base',
required=['tag'],
**{'update': {'type': 'boolean'},
'tag': {'type': 'string'},
'days': {'type': 'integer'}})
schema_alias = True
def __init__(self, data=None, manager=None, log_dir=None):
super(AutoTagBase, self).__init__(data, manager, log_dir)
@abstractmethod
def _get_tag_value_from_event(self, event):
raise NotImplementedError()
@abstractmethod
def _get_tag_value_from_resource(self, resource):
raise NotImplementedError()
def validate(self):
if self.manager.data.get('mode', {}).get('type') == 'azure-event-grid' \
and self.data.get('days') is not None:
raise PolicyValidationError(
"Auto tag actions in event mode does not use days.")
if (self.data.get('days') is not None and
(self.data.get('days') < 1 or self.data.get('days') > 90)):
raise FilterValidationError("Days must be between 1 and 90")
return self
def _prepare_processing(self):
self.session = self.manager.get_session()
self.client = self.manager.get_client('azure.mgmt.monitor.MonitorManagementClient')
self.tag_key = self.data['tag']
self.should_update = self.data.get('update', False)
def _process_resource(self, resource, event):
# if the auto-tag-user policy set update to False (or it's unset) then we
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.should_update and resource.get('tags', {}).get(self.tag_key, None):
return
tag_value = self.default_value
if event:
tag_value = self._get_tag_value_from_event(event) or tag_value
else:
tag_value = self._get_tag_value_from_resource(resource) or tag_value
TagHelper.add_tags(self, resource, {self.tag_key: tag_value})
def _get_first_event(self, resource):
if 'c7n:first_iam_event' in resource:
return resource['c7n:first_iam_event']
# Makes patching this easier
from c7n_azure.utils import utcnow
# Calculate start time
delta_days = self.data.get('days', self.max_query_days)
start_time = utcnow() - datetime.timedelta(days=delta_days)
# resource group type
if self.manager.type == 'resourcegroup':
resource_type = "Microsoft.Resources/subscriptions/resourcegroups"
query_filter = " and ".join([
"eventTimestamp ge '%s'" % start_time,
"resourceGroupName eq '%s'" % resource['name'],
"eventChannels eq 'Operation'",
"resourceType eq '%s'" % resource_type
])
# other Azure resources
else:
resource_type = resource['type']
query_filter = " and ".join([
"eventTimestamp ge '%s'" % start_time,
"resourceUri eq '%s'" % resource['id'],
"eventChannels eq 'Operation'",
"resourceType eq '%s'" % resource_type
])
# fetch activity logs
logs = self.client.activity_logs.list(
filter=query_filter,
select=self.query_select
)
# get the user who issued the first operation
operation_name = "%s/write" % resource_type
first_event = None
for l in logs:
if l.operation_name.value and l.operation_name.value.lower() == operation_name.lower():
first_event = l
resource['c7n:first_iam_event'] = first_event
return first_event
class AutoTagUser(AutoTagBase):
"""Attempts to tag a resource with the first user who created/modified it.
:example:
This policy will tag all existing resource groups with the 'CreatorEmail' tag
.. code-block:: yaml
policies:
- name: azure-auto-tag-creator
resource: azure.resourcegroup
description: |
Tag all existing resource groups with the 'CreatorEmail' tag
actions:
- type: auto-tag-user
tag: CreatorEmail
This action searches from the earliest 'write' operation's caller
in the activity logs for a particular resource.
Note: activity logs are only held for the last 90 days.
"""
schema = type_schema('auto-tag-user',
rinherit=AutoTagBase.schema,
**{
'default-claim': {'enum': ['upn', 'name']}
})
log = logging.getLogger('custodian.azure.tagging.AutoTagUser')
# compiled JMES paths
service_admin_jmes_path = jmespath.compile(constants.EVENT_GRID_SERVICE_ADMIN_JMES_PATH)
sp_jmes_path = jmespath.compile(constants.EVENT_GRID_SP_NAME_JMES_PATH)
upn_jmes_path = jmespath.compile(constants.EVENT_GRID_UPN_CLAIM_JMES_PATH)
name_jmes_path = jmespath.compile(constants.EVENT_GRID_NAME_CLAIM_JMES_PATH)
principal_role_jmes_path = jmespath.compile(constants.EVENT_GRID_PRINCIPAL_ROLE_JMES_PATH)
principal_type_jmes_path = jmespath.compile(constants.EVENT_GRID_PRINCIPAL_TYPE_JMES_PATH)
def __init__(self, data=None, manager=None, log_dir=None):
super(AutoTagUser, self).__init__(data, manager, log_dir)
self.query_select = "eventTimestamp, operationName, caller, claims"
self.default_claim = self.data.get('default-claim', 'upn')
def _get_tag_value_from_event(self, event):
principal_role = self.principal_role_jmes_path.search(event)
principal_type = self.principal_type_jmes_path.search(event)
user = None
# The Subscription Admins role does not have a principal type
if StringUtils.equal(principal_role, 'Subscription Admin'):
user = self.service_admin_jmes_path.search(event)
# ServicePrincipal type
elif StringUtils.equal(principal_type, 'ServicePrincipal'):
user = self.sp_jmes_path.search(event)
if not user:
known_claims = {'upn': self.upn_jmes_path.search(event),
'name': self.name_jmes_path.search(event)}
if known_claims[self.default_claim]:
user = known_claims[self.default_claim]
elif self.default_claim == 'upn' and known_claims['name']:
user = known_claims['name']
elif self.default_claim == 'name' and known_claims['upn']:
user = known_claims['upn']
# Last effort search for an email address in the claims
if not user:
claims = event['data'].get('claims', [])
for c in claims:
value = claims[c]
if self._is_email(value):
user = value
break
if not user:
self.log.error('Principal could not be determined.')
return user
def _is_email(self, target):
if target is None:
return False
elif parseaddr(target)[1] and '@' in target and '.' in target:
return True
else:
return False
def _get_tag_value_from_resource(self, resource):
first_op = self._get_first_event(resource).serialize(True)
return self._get_tag_value_from_event({'data': first_op})
class AutoTagDate(AutoTagBase):
"""
Attempts to tag a resource with the date when resource was created.
This action searches from the earliest 'write' operation's caller
in the activity logs for a particular resource.
Note: activity logs are only held for the last 90 days.
:example:
This policy will tag all existing resource groups with the 'CreatedDate' tag
.. code-block:: yaml
policies:
- name: azure-auto-tag-created-date
resource: azure.resourcegroup
description: |
Tag all existing resource groups with the 'CreatedDate' tag
actions:
- type: auto-tag-date
tag: CreatedDate
format: "%m-%d-%Y"
"""
schema = type_schema('auto-tag-date', rinherit=AutoTagBase.schema,
**{'format': {'type': 'string'}})
event_time_path = jmespath.compile(constants.EVENT_GRID_EVENT_TIME_PATH)
log = logging.getLogger('custodian.azure.tagging.AutoTagDate')
def __init__(self, data=None, manager=None, log_dir=None):
super(AutoTagDate, self).__init__(data, manager, log_dir)
self.format = self.data.get('format', '%m.%d.%Y')
def validate(self):
super(AutoTagDate, self).validate()
try:
datetime.datetime.now().strftime(self.format)
except Exception:
raise FilterValidationError("'%s' string has invalid datetime format." % self.format)
def _get_tag_value_from_event(self, event):
event_time = Deserializer.deserialize_iso(self.event_time_path.search(event))
return event_time.strftime(self.format)
def _get_tag_value_from_resource(self, resource):
first_op = self._get_first_event(resource)
if not first_op:
return None
return first_op.event_timestamp.strftime(self.format)
class TagTrim(AzureBaseAction):
"""Automatically remove tags from an azure resource.
Azure Resources and Resource Groups have a limit of 50 tags.
In order to make additional tag space on a set of resources,
this action can be used to remove enough tags to make the
desired amount of space while preserving a given set of tags.
Setting the space value to 0 removes all tags but those
listed to preserve.
:example:
.. code-block :: yaml
policies:
- name: azure-tag-trim
comment: |
Any instances with 49 or more tags get tags removed until
they match the target tag count, in this case 48, so
that we free up tag slots for another usage.
resource: azure.resourcegroup
filters:
# Filter down to resources that do not have the space
# to add additional required tags. For example, if an
# additional 2 tags need to be added to a resource, with
# 50 tags as the limit, then filter down to resources that
# have 49 or more tags since they will need to have tags
# removed for the 2 extra. This also ensures that metrics
# reporting is correct for the policy.
- type: value
key: "length(Tags)"
op: ge
value: 49
actions:
- type: tag-trim
space: 2
preserve:
- OwnerContact
- Environment
- downtime
- custodian_status
"""
max_tag_count = 50
schema = utils.type_schema(
'tag-trim',
space={'type': 'integer'},
preserve={'type': 'array', 'items': {'type': 'string'}})
schema_alias = True
log = logging.getLogger('custodian.azure.tagging.TagTrim')
def __init__(self, data=None, manager=None, log_dir=None):
super(TagTrim, self).__init__(data, manager, log_dir)
self.preserve = set(self.data.get('preserve', {}))
self.space = self.data.get('space', 1)
def validate(self):
if self.space < 0 or self.space > self.max_tag_count:
raise FilterValidationError("Space must be between 0 and %i" % self.max_tag_count)
return self
def _process_resource(self, resource):
tags = resource.get('tags', {})
if self.space and len(tags) + self.space <= self.max_tag_count:
return
# delete tags
keys = set(tags)
tags_to_preserve = self.preserve.intersection(keys)
candidates = keys - tags_to_preserve
if self.space:
# Free up slots to fit
remove = (len(candidates) -
(self.max_tag_count - (self.space + len(tags_to_preserve))))
candidates = list(sorted(candidates))[:remove]
if not candidates:
self.log.warning(
"Could not find any candidates to trim %s" % resource['id'])
return
TagHelper.remove_tags(self, resource, candidates)
DEFAULT_TAG = "custodian_status"
class TagDelayedAction(AzureBaseAction):
"""Tag resources for future action.
The optional 'tz' parameter can be used to adjust the clock to align
with a given timezone. The default value is 'utc'.
If neither 'days' nor 'hours' is specified, Cloud Custodian will default
to marking the resource for action 4 days in the future.
:example:
.. code-block :: yaml
policies:
- name: vm-mark-for-stop
resource: azure.vm
filters:
- type: value
key: Name
value: instance-to-stop-in-four-days
actions:
- type: mark-for-op
op: stop
"""
schema = utils.type_schema(
'mark-for-op',
tag={'type': 'string'},
msg={'type': 'string'},
days={'type': 'number', 'minimum': 0, 'exclusiveMinimum': False},
hours={'type': 'number', 'minimum': 0, 'exclusiveMinimum': False},
tz={'type': 'string'},
op={'type': 'string'})
schema_alias = True
log = logging.getLogger('custodian.azure.tagging.TagDelayed')
default_template = 'Resource does not meet policy: {op}@{action_date}'
def __init__(self, data=None, manager=None, log_dir=None):
super(TagDelayedAction, self).__init__(data, manager, log_dir)
self.tz = tzutils.gettz(
Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
msg_tmpl = self.data.get('msg', self.default_template)
op = self.data.get('op', 'stop')
days = self.data.get('days', 0)
hours = self.data.get('hours', 0)
action_date = self.generate_timestamp(days, hours)
self.tag = self.data.get('tag', DEFAULT_TAG)
self.msg = msg_tmpl.format(
op=op, action_date=action_date)
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise FilterValidationError(
"mark-for-op specifies invalid op:%s in %s" % (
op, self.manager.data))
self.tz = tzutils.gettz(
Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
if not self.tz:
raise FilterValidationError(
"Invalid timezone specified %s in %s" % (
self.tz, self.manager.data))
return self
def generate_timestamp(self, days, hours):
from c7n_azure.utils import now
n = now(tz=self.tz)
if days is None or hours is None:
# maintains default value of days being 4 if nothing is provided
days = 4
action_date = (n + datetime.timedelta(days=days, hours=hours))
if hours > 0:
action_date_string = action_date.strftime('%Y/%m/%d %H%M %Z')
else:
action_date_string = action_date.strftime('%Y/%m/%d')
return action_date_string
def _process_resource(self, resource):
tags = resource.get('tags', {})
# add new tag
tags[self.tag] = self.msg
TagHelper.update_resource_tags(self, resource, tags)
|
|
'''
All constant data used in the package should be defined here.
'''
from collections import OrderedDict as BASE_DICT
BLENDING_TYPES = type('Blending', (), {
'NONE': 'NoBlending',
'NORMAL': 'NormalBlending',
'ADDITIVE': 'AdditiveBlending',
'SUBTRACTIVE': 'SubtractiveBlending',
'MULTIPLY': 'MultiplyBlending',
'CUSTOM': 'CustomBlending'
})
NEAREST_FILTERS = type('NearestFilters', (), {
'NEAREST': 'NearestFilter',
'MIP_MAP_NEAREST': 'NearestMipMapNearestFilter',
'MIP_MAP_LINEAR': 'NearestMipMapLinearFilter'
})
LINEAR_FILTERS = type('LinearFilters', (), {
'LINEAR': 'LinearFilter',
'MIP_MAP_NEAREST': 'LinearMipMapNearestFilter',
'MIP_MAP_LINEAR': 'LinearMipMapLinearFilter'
})
MAPPING_TYPES = type('Mapping', (), {
'UV': 'UVMapping',
'CUBE_REFLECTION': 'CubeReflectionMapping',
'CUBE_REFRACTION': 'CubeRefractionMapping',
'SPHERICAL_REFLECTION': 'SphericalReflectionMapping'
})
NUMERIC = {
'UVMapping': 300,
'CubeReflectionMapping': 301,
'CubeRefractionMapping': 302,
'EquirectangularReflectionMapping': 303,
'EquirectangularRefractionMapping': 304,
'SphericalReflectionMapping': 305,
'RepeatWrapping': 1000,
'ClampToEdgeWrapping': 1001,
'MirroredRepeatWrapping': 1002,
'NearestFilter': 1003,
'NearestMipMapNearestFilter': 1004,
'NearestMipMapLinearFilter': 1005,
'LinearFilter': 1006,
'LinearMipMapNearestFilter': 1007,
'LinearMipMapLinearFilter': 1008
}
JSON = 'json'
EXTENSION = '.%s' % JSON
INDENT = 'indent'
MATERIALS = 'materials'
SCENE = 'scene'
VERTICES = 'vertices'
FACES = 'faces'
NORMALS = 'normals'
BONES = 'bones'
UVS = 'uvs'
APPLY_MODIFIERS = 'applyModifiers'
COLORS = 'colors'
MIX_COLORS = 'mixColors'
EXTRA_VGROUPS = 'extraVertexGroups'
INDEX = 'index'
DRAW_CALLS = 'drawcalls'
DC_START = 'start'
DC_COUNT = 'count'
DC_INDEX = 'index'
SCALE = 'scale'
COMPRESSION = 'compression'
MAPS = 'maps'
FRAME_STEP = 'frameStep'
FRAME_INDEX_AS_TIME = 'frameIndexAsTime'
ANIMATION = 'animations'
CLIPS="clips"
KEYFRAMES = 'tracks'
MORPH_TARGETS = 'morphTargets'
MORPH_TARGETS_ANIM = 'morphTargetsAnimation'
BLEND_SHAPES = 'blendShapes'
POSE = 'pose'
REST = 'rest'
SKIN_INDICES = 'skinIndices'
SKIN_WEIGHTS = 'skinWeights'
LOGGING = 'logging'
CAMERAS = 'cameras'
LIGHTS = 'lights'
HIERARCHY = 'hierarchy'
FACE_MATERIALS = 'faceMaterials'
SKINNING = 'skinning'
EXPORT_TEXTURES = 'exportTextures'
EMBED_TEXTURES = 'embedTextures'
TEXTURE_FOLDER = 'textureFolder'
ENABLE_PRECISION = 'enablePrecision'
PRECISION = 'precision'
DEFAULT_PRECISION = 6
CUSTOM_PROPERTIES = 'customProperties'
EMBED_GEOMETRY = 'embedGeometry'
EMBED_ANIMATION = 'embedAnimation'
OFF = 'off'
GLOBAL = 'global'
BUFFER_GEOMETRY = 'BufferGeometry'
GEOMETRY = 'geometry'
GEOMETRY_TYPE = 'geometryType'
INDEX_TYPE = 'indexType'
CRITICAL = 'critical'
ERROR = 'error'
WARNING = 'warning'
INFO = 'info'
DEBUG = 'debug'
DISABLED = 'disabled'
NONE = 'None'
MSGPACK = 'msgpack'
PACK = 'pack'
FLOAT_32 = 'Float32Array'
UINT_16 = 'Uint16Array'
UINT_32 = 'Uint32Array'
INFLUENCES_PER_VERTEX = 'influencesPerVertex'
EXPORT_OPTIONS = {
FACES: True,
VERTICES: True,
NORMALS: True,
UVS: True,
APPLY_MODIFIERS: True,
COLORS: False,
EXTRA_VGROUPS: '',
INDEX_TYPE: UINT_16,
MATERIALS: False,
FACE_MATERIALS: False,
SCALE: 1,
FRAME_STEP: 1,
FRAME_INDEX_AS_TIME: False,
SCENE: False,
MIX_COLORS: False,
COMPRESSION: None,
MAPS: False,
ANIMATION: OFF,
KEYFRAMES: False,
BONES: False,
SKINNING: False,
MORPH_TARGETS: False,
BLEND_SHAPES: False,
CAMERAS: False,
LIGHTS: False,
HIERARCHY: False,
EXPORT_TEXTURES: True,
EMBED_TEXTURES: False,
TEXTURE_FOLDER: '',
LOGGING: DEBUG,
ENABLE_PRECISION: True,
PRECISION: DEFAULT_PRECISION,
CUSTOM_PROPERTIES: False,
EMBED_GEOMETRY: True,
EMBED_ANIMATION: True,
GEOMETRY_TYPE: GEOMETRY,
INFLUENCES_PER_VERTEX: 2,
INDENT: True
}
FORMAT_VERSION = 4.4
VERSION = 'version'
THREE = 'io_three'
GENERATOR = 'generator'
SOURCE_FILE = 'sourceFile'
VALID_DATA_TYPES = (str, int, float, bool, list, tuple, dict)
JSON = 'json'
GZIP = 'gzip'
EXTENSIONS = {
JSON: '.json',
MSGPACK: '.pack',
GZIP: '.gz'
}
METADATA = 'metadata'
GEOMETRIES = 'geometries'
IMAGES = 'images'
TEXTURE = 'texture'
TEXTURES = 'textures'
USER_DATA = 'userData'
DATA = 'data'
TYPE = 'type'
MATERIAL = 'material'
OBJECT = 'object'
PERSPECTIVE_CAMERA = 'PerspectiveCamera'
ORTHOGRAPHIC_CAMERA = 'OrthographicCamera'
AMBIENT_LIGHT = 'AmbientLight'
DIRECTIONAL_LIGHT = 'DirectionalLight'
POINT_LIGHT = 'PointLight'
SPOT_LIGHT = 'SpotLight'
HEMISPHERE_LIGHT = 'HemisphereLight'
MESH = 'Mesh'
EMPTY = 'Empty'
SPRITE = 'Sprite'
DEFAULT_METADATA = {
VERSION: FORMAT_VERSION,
TYPE: OBJECT.title(),
GENERATOR: THREE
}
UUID = 'uuid'
MATRIX = 'matrix'
POSITION = 'position'
QUATERNION = 'quaternion'
ROTATION = 'rotation'
SCALE = 'scale'
UV = 'uv'
UV2 = 'uv2'
ATTRIBUTES = 'attributes'
NORMAL = 'normal'
ITEM_SIZE = 'itemSize'
ARRAY = 'array'
FLOAT_32 = 'Float32Array'
VISIBLE = 'visible'
CAST_SHADOW = 'castShadow'
RECEIVE_SHADOW = 'receiveShadow'
QUAD = 'quad'
USER_DATA = 'userData'
MASK = {
QUAD: 0,
MATERIALS: 1,
UVS: 3,
NORMALS: 5,
COLORS: 7
}
CHILDREN = 'children'
URL = 'url'
WRAP = 'wrap'
REPEAT = 'repeat'
WRAPPING = type('Wrapping', (), {
'REPEAT': 'RepeatWrapping',
'CLAMP': 'ClampToEdgeWrapping',
'MIRROR': 'MirroredRepeatWrapping'
})
ANISOTROPY = 'anisotropy'
MAG_FILTER = 'magFilter'
MIN_FILTER = 'minFilter'
MAPPING = 'mapping'
IMAGE = 'image'
NAME = 'name'
PARENT = 'parent'
LENGTH = 'length'
FPS = 'fps'
HIERARCHY = 'hierarchy'
POS = 'pos'
ROTQ = 'rotq'
ROT = 'rot'
SCL = 'scl'
TIME = 'time'
KEYS = 'keys'
COLOR = 'color'
EMISSIVE = 'emissive'
SPECULAR = 'specular'
SPECULAR_COEF = 'specularCoef'
SHININESS = 'shininess'
SIDE = 'side'
OPACITY = 'opacity'
TRANSPARENT = 'transparent'
WIREFRAME = 'wireframe'
BLENDING = 'blending'
VERTEX_COLORS = 'vertexColors'
DEPTH_WRITE = 'depthWrite'
DEPTH_TEST = 'depthTest'
MAP = 'map'
SPECULAR_MAP = 'specularMap'
LIGHT_MAP = 'lightMap'
BUMP_MAP = 'bumpMap'
BUMP_SCALE = 'bumpScale'
NORMAL_MAP = 'normalMap'
NORMAL_SCALE = 'normalScale'
#@TODO ENV_MAP, REFLECTIVITY, REFRACTION_RATIO, COMBINE
MAP_DIFFUSE = 'mapDiffuse'
MAP_DIFFUSE_REPEAT = 'mapDiffuseRepeat'
MAP_DIFFUSE_WRAP = 'mapDiffuseWrap'
MAP_DIFFUSE_ANISOTROPY = 'mapDiffuseAnisotropy'
MAP_SPECULAR = 'mapSpecular'
MAP_SPECULAR_REPEAT = 'mapSpecularRepeat'
MAP_SPECULAR_WRAP = 'mapSpecularWrap'
MAP_SPECULAR_ANISOTROPY = 'mapSpecularAnisotropy'
MAP_LIGHT = 'mapLight'
MAP_LIGHT_REPEAT = 'mapLightRepeat'
MAP_LIGHT_WRAP = 'mapLightWrap'
MAP_LIGHT_ANISOTROPY = 'mapLightAnisotropy'
MAP_NORMAL = 'mapNormal'
MAP_NORMAL_FACTOR = 'mapNormalFactor'
MAP_NORMAL_REPEAT = 'mapNormalRepeat'
MAP_NORMAL_WRAP = 'mapNormalWrap'
MAP_NORMAL_ANISOTROPY = 'mapNormalAnisotropy'
MAP_BUMP = 'mapBump'
MAP_BUMP_REPEAT = 'mapBumpRepeat'
MAP_BUMP_WRAP = 'mapBumpWrap'
MAP_BUMP_ANISOTROPY = 'mapBumpAnisotropy'
MAP_BUMP_SCALE = 'mapBumpScale'
NORMAL_BLENDING = 0
VERTEX_COLORS_ON = 2
VERTEX_COLORS_OFF = 0
SIDE_DOUBLE = 2
THREE_BASIC = 'MeshBasicMaterial'
THREE_LAMBERT = 'MeshLambertMaterial'
THREE_PHONG = 'MeshPhongMaterial'
INTENSITY = 'intensity'
DISTANCE = 'distance'
ASPECT = 'aspect'
ANGLE = 'angle'
DECAY = 'decayExponent'
FOV = 'fov'
ASPECT = 'aspect'
NEAR = 'near'
FAR = 'far'
LEFT = 'left'
RIGHT = 'right'
TOP = 'top'
BOTTOM = 'bottom'
SHADING = 'shading'
COLOR_DIFFUSE = 'colorDiffuse'
COLOR_EMISSIVE = 'colorEmissive'
COLOR_SPECULAR = 'colorSpecular'
DBG_NAME = 'DbgName'
DBG_COLOR = 'DbgColor'
DBG_INDEX = 'DbgIndex'
EMIT = 'emit'
PHONG = 'phong'
LAMBERT = 'lambert'
BASIC = 'basic'
NORMAL_BLENDING = 'NormalBlending'
DBG_COLORS = (0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee,
0xeeee00, 0x00eeee, 0xee00ee)
DOUBLE_SIDED = 'doubleSided'
EXPORT_SETTINGS_KEY = 'threeExportSettings'
|
|
import collections
import itertools
import math
import numbers
import numpy as np
from zarr.errors import (
ArrayIndexError,
NegativeStepError,
err_too_many_indices,
VindexInvalidSelectionError,
BoundsCheckError,
)
def is_integer(x):
"""True if x is an integer (both pure Python or NumPy).
Note that Python's bool is considered an integer too.
"""
return isinstance(x, numbers.Integral)
def is_integer_list(x):
"""True if x is a list of integers.
This function assumes ie *does not check* that all elements of the list
have the same type. Mixed type lists will result in other errors that will
bubble up anyway.
"""
return isinstance(x, list) and len(x) > 0 and is_integer(x[0])
def is_integer_array(x, ndim=None):
t = not np.isscalar(x) and \
hasattr(x, 'shape') and \
hasattr(x, 'dtype') and \
x.dtype.kind in 'ui'
if ndim is not None:
t = t and len(x.shape) == ndim
return t
def is_bool_array(x, ndim=None):
t = hasattr(x, 'shape') and hasattr(x, 'dtype') and x.dtype == bool
if ndim is not None:
t = t and len(x.shape) == ndim
return t
def is_scalar(value, dtype):
if np.isscalar(value):
return True
if isinstance(value, tuple) and dtype.names and len(value) == len(dtype.names):
return True
return False
def is_pure_fancy_indexing(selection, ndim):
"""Check whether a selection contains only scalars or integer array-likes.
Parameters
----------
selection : tuple, slice, or scalar
A valid selection value for indexing into arrays.
Returns
-------
is_pure : bool
True if the selection is a pure fancy indexing expression (ie not mixed
with boolean or slices).
"""
if ndim == 1:
if is_integer_list(selection) or is_integer_array(selection):
return True
# if not, we go through the normal path below, because a 1-tuple
# of integers is also allowed.
no_slicing = (
isinstance(selection, tuple)
and len(selection) == ndim
and not (
any(isinstance(elem, slice) or elem is Ellipsis
for elem in selection)
)
)
return (
no_slicing and
all(
is_integer(elem)
or is_integer_list(elem)
or is_integer_array(elem)
for elem in selection
) and
any(
is_integer_list(elem)
or is_integer_array(elem)
for elem in selection
)
)
def normalize_integer_selection(dim_sel, dim_len):
# normalize type to int
dim_sel = int(dim_sel)
# handle wraparound
if dim_sel < 0:
dim_sel = dim_len + dim_sel
# handle out of bounds
if dim_sel >= dim_len or dim_sel < 0:
raise BoundsCheckError(dim_len)
return dim_sel
ChunkDimProjection = collections.namedtuple(
'ChunkDimProjection',
('dim_chunk_ix', 'dim_chunk_sel', 'dim_out_sel')
)
"""A mapping from chunk to output array for a single dimension.
Parameters
----------
dim_chunk_ix
Index of chunk.
dim_chunk_sel
Selection of items from chunk array.
dim_out_sel
Selection of items in target (output) array.
"""
class IntDimIndexer:
def __init__(self, dim_sel, dim_len, dim_chunk_len):
# normalize
dim_sel = normalize_integer_selection(dim_sel, dim_len)
# store attributes
self.dim_sel = dim_sel
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nitems = 1
def __iter__(self):
dim_chunk_ix = self.dim_sel // self.dim_chunk_len
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel - dim_offset
dim_out_sel = None
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
def ceildiv(a, b):
return math.ceil(a / b)
class SliceDimIndexer:
def __init__(self, dim_sel, dim_len, dim_chunk_len):
# normalize
self.start, self.stop, self.step = dim_sel.indices(dim_len)
if self.step < 1:
raise NegativeStepError()
# store attributes
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nitems = max(0, ceildiv((self.stop - self.start), self.step))
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
def __iter__(self):
# figure out the range of chunks we need to visit
dim_chunk_ix_from = self.start // self.dim_chunk_len
dim_chunk_ix_to = ceildiv(self.stop, self.dim_chunk_len)
# iterate over chunks in range
for dim_chunk_ix in range(dim_chunk_ix_from, dim_chunk_ix_to):
# compute offsets for chunk within overall array
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_limit = min(self.dim_len, (dim_chunk_ix + 1) * self.dim_chunk_len)
# determine chunk length, accounting for trailing chunk
dim_chunk_len = dim_limit - dim_offset
if self.start < dim_offset:
# selection starts before current chunk
dim_chunk_sel_start = 0
remainder = (dim_offset - self.start) % self.step
if remainder:
dim_chunk_sel_start += self.step - remainder
# compute number of previous items, provides offset into output array
dim_out_offset = ceildiv((dim_offset - self.start), self.step)
else:
# selection starts within current chunk
dim_chunk_sel_start = self.start - dim_offset
dim_out_offset = 0
if self.stop > dim_limit:
# selection ends after current chunk
dim_chunk_sel_stop = dim_chunk_len
else:
# selection ends within current chunk
dim_chunk_sel_stop = self.stop - dim_offset
dim_chunk_sel = slice(dim_chunk_sel_start, dim_chunk_sel_stop, self.step)
dim_chunk_nitems = ceildiv((dim_chunk_sel_stop - dim_chunk_sel_start),
self.step)
dim_out_sel = slice(dim_out_offset, dim_out_offset + dim_chunk_nitems)
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
def check_selection_length(selection, shape):
if len(selection) > len(shape):
err_too_many_indices(selection, shape)
def replace_ellipsis(selection, shape):
selection = ensure_tuple(selection)
# count number of ellipsis present
n_ellipsis = sum(1 for i in selection if i is Ellipsis)
if n_ellipsis > 1:
# more than 1 is an error
raise IndexError("an index can only have a single ellipsis ('...')")
elif n_ellipsis == 1:
# locate the ellipsis, count how many items to left and right
n_items_l = selection.index(Ellipsis) # items to left of ellipsis
n_items_r = len(selection) - (n_items_l + 1) # items to right of ellipsis
n_items = len(selection) - 1 # all non-ellipsis items
if n_items >= len(shape):
# ellipsis does nothing, just remove it
selection = tuple(i for i in selection if i != Ellipsis)
else:
# replace ellipsis with as many slices are needed for number of dims
new_item = selection[:n_items_l] + ((slice(None),) * (len(shape) - n_items))
if n_items_r:
new_item += selection[-n_items_r:]
selection = new_item
# fill out selection if not completely specified
if len(selection) < len(shape):
selection += (slice(None),) * (len(shape) - len(selection))
# check selection not too long
check_selection_length(selection, shape)
return selection
def replace_lists(selection):
return tuple(
np.asarray(dim_sel) if isinstance(dim_sel, list) else dim_sel
for dim_sel in selection
)
def ensure_tuple(v):
if not isinstance(v, tuple):
v = (v,)
return v
ChunkProjection = collections.namedtuple(
'ChunkProjection',
('chunk_coords', 'chunk_selection', 'out_selection')
)
"""A mapping of items from chunk to output array. Can be used to extract items from the
chunk array for loading into an output array. Can also be used to extract items from a
value array for setting/updating in a chunk array.
Parameters
----------
chunk_coords
Indices of chunk.
chunk_selection
Selection of items from chunk array.
out_selection
Selection of items in target (output) array.
"""
def is_slice(s):
return isinstance(s, slice)
def is_contiguous_slice(s):
return is_slice(s) and (s.step is None or s.step == 1)
def is_positive_slice(s):
return is_slice(s) and (s.step is None or s.step >= 1)
def is_contiguous_selection(selection):
selection = ensure_tuple(selection)
return all(
(is_integer_array(s) or is_contiguous_slice(s) or s == Ellipsis)
for s in selection
)
def is_basic_selection(selection):
selection = ensure_tuple(selection)
return all(is_integer(s) or is_positive_slice(s) for s in selection)
# noinspection PyProtectedMember
class BasicIndexer:
def __init__(self, selection, array):
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_len in \
zip(selection, array._shape, array._chunks):
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_slice(dim_sel):
dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len)
else:
raise IndexError('unsupported selection item for basic indexing; '
'expected integer or slice, got {!r}'
.format(type(dim_sel)))
dim_indexers.append(dim_indexer)
self.dim_indexers = dim_indexers
self.shape = tuple(s.nitems for s in self.dim_indexers
if not isinstance(s, IntDimIndexer))
self.drop_axes = None
def __iter__(self):
for dim_projections in itertools.product(*self.dim_indexers):
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(p.dim_out_sel for p in dim_projections
if p.dim_out_sel is not None)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection)
class BoolArrayDimIndexer:
def __init__(self, dim_sel, dim_len, dim_chunk_len):
# check number of dimensions
if not is_bool_array(dim_sel, 1):
raise IndexError('Boolean arrays in an orthogonal selection must '
'be 1-dimensional only')
# check shape
if dim_sel.shape[0] != dim_len:
raise IndexError('Boolean array has the wrong length for dimension; '
'expected {}, got {}'.format(dim_len, dim_sel.shape[0]))
# store attributes
self.dim_sel = dim_sel
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
# precompute number of selected items for each chunk
self.chunk_nitems = np.zeros(self.nchunks, dtype='i8')
for dim_chunk_ix in range(self.nchunks):
dim_offset = dim_chunk_ix * self.dim_chunk_len
self.chunk_nitems[dim_chunk_ix] = np.count_nonzero(
self.dim_sel[dim_offset:dim_offset + self.dim_chunk_len]
)
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
self.nitems = self.chunk_nitems_cumsum[-1]
self.dim_chunk_ixs = np.nonzero(self.chunk_nitems)[0]
def __iter__(self):
# iterate over chunks with at least one item
for dim_chunk_ix in self.dim_chunk_ixs:
# find region in chunk
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel[dim_offset:dim_offset + self.dim_chunk_len]
# pad out if final chunk
if dim_chunk_sel.shape[0] < self.dim_chunk_len:
tmp = np.zeros(self.dim_chunk_len, dtype=bool)
tmp[:dim_chunk_sel.shape[0]] = dim_chunk_sel
dim_chunk_sel = tmp
# find region in output
if dim_chunk_ix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[dim_chunk_ix - 1]
stop = self.chunk_nitems_cumsum[dim_chunk_ix]
dim_out_sel = slice(start, stop)
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
class Order:
UNKNOWN = 0
INCREASING = 1
DECREASING = 2
UNORDERED = 3
@staticmethod
def check(a):
diff = np.diff(a)
diff_positive = diff >= 0
n_diff_positive = np.count_nonzero(diff_positive)
all_increasing = n_diff_positive == len(diff_positive)
any_increasing = n_diff_positive > 0
if all_increasing:
order = Order.INCREASING
elif any_increasing:
order = Order.UNORDERED
else:
order = Order.DECREASING
return order
def wraparound_indices(x, dim_len):
loc_neg = x < 0
if np.any(loc_neg):
x[loc_neg] = x[loc_neg] + dim_len
def boundscheck_indices(x, dim_len):
if np.any(x < 0) or np.any(x >= dim_len):
raise BoundsCheckError(dim_len)
class IntArrayDimIndexer:
"""Integer array selection against a single dimension."""
def __init__(self, dim_sel, dim_len, dim_chunk_len, wraparound=True, boundscheck=True,
order=Order.UNKNOWN):
# ensure 1d array
dim_sel = np.asanyarray(dim_sel)
if not is_integer_array(dim_sel, 1):
raise IndexError('integer arrays in an orthogonal selection must be '
'1-dimensional only')
# handle wraparound
if wraparound:
wraparound_indices(dim_sel, dim_len)
# handle out of bounds
if boundscheck:
boundscheck_indices(dim_sel, dim_len)
# store attributes
self.dim_len = dim_len
self.dim_chunk_len = dim_chunk_len
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
self.nitems = len(dim_sel)
# determine which chunk is needed for each selection item
# note: for dense integer selections, the division operation here is the
# bottleneck
dim_sel_chunk = dim_sel // dim_chunk_len
# determine order of indices
if order == Order.UNKNOWN:
order = Order.check(dim_sel)
self.order = order
if self.order == Order.INCREASING:
self.dim_sel = dim_sel
self.dim_out_sel = None
elif self.order == Order.DECREASING:
self.dim_sel = dim_sel[::-1]
# TODO should be possible to do this without creating an arange
self.dim_out_sel = np.arange(self.nitems - 1, -1, -1)
else:
# sort indices to group by chunk
self.dim_out_sel = np.argsort(dim_sel_chunk)
self.dim_sel = np.take(dim_sel, self.dim_out_sel)
# precompute number of selected items for each chunk
self.chunk_nitems = np.bincount(dim_sel_chunk, minlength=self.nchunks)
# find chunks that we need to visit
self.dim_chunk_ixs = np.nonzero(self.chunk_nitems)[0]
# compute offsets into the output array
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
def __iter__(self):
for dim_chunk_ix in self.dim_chunk_ixs:
# find region in output
if dim_chunk_ix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[dim_chunk_ix - 1]
stop = self.chunk_nitems_cumsum[dim_chunk_ix]
if self.order == Order.INCREASING:
dim_out_sel = slice(start, stop)
else:
dim_out_sel = self.dim_out_sel[start:stop]
# find region in chunk
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel[start:stop] - dim_offset
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel)
def slice_to_range(s: slice, l: int): # noqa: E741
return range(*s.indices(l))
def ix_(selection, shape):
"""Convert an orthogonal selection to a numpy advanced (fancy) selection, like numpy.ix_
but with support for slices and single ints."""
# normalisation
selection = replace_ellipsis(selection, shape)
# replace slice and int as these are not supported by numpy.ix_
selection = [slice_to_range(dim_sel, dim_len) if isinstance(dim_sel, slice)
else [dim_sel] if is_integer(dim_sel)
else dim_sel
for dim_sel, dim_len in zip(selection, shape)]
# now get numpy to convert to a coordinate selection
selection = np.ix_(*selection)
return selection
def oindex(a, selection):
"""Implementation of orthogonal indexing with slices and ints."""
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
result = a[selection]
if drop_axes:
result = result.squeeze(axis=drop_axes)
return result
def oindex_set(a, selection, value):
selection = replace_ellipsis(selection, a.shape)
drop_axes = tuple([i for i, s in enumerate(selection) if is_integer(s)])
selection = ix_(selection, a.shape)
if not np.isscalar(value) and drop_axes:
value = np.asanyarray(value)
value_selection = [slice(None)] * len(a.shape)
for i in drop_axes:
value_selection[i] = np.newaxis
value_selection = tuple(value_selection)
value = value[value_selection]
a[selection] = value
# noinspection PyProtectedMember
class OrthogonalIndexer:
def __init__(self, selection, array):
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
# normalize list to array
selection = replace_lists(selection)
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_len in \
zip(selection, array._shape, array._chunks):
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif isinstance(dim_sel, slice):
dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_integer_array(dim_sel):
dim_indexer = IntArrayDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_bool_array(dim_sel):
dim_indexer = BoolArrayDimIndexer(dim_sel, dim_len, dim_chunk_len)
else:
raise IndexError('unsupported selection item for orthogonal indexing; '
'expected integer, slice, integer array or Boolean '
'array, got {!r}'
.format(type(dim_sel)))
dim_indexers.append(dim_indexer)
self.array = array
self.dim_indexers = dim_indexers
self.shape = tuple(s.nitems for s in self.dim_indexers
if not isinstance(s, IntDimIndexer))
self.is_advanced = not is_basic_selection(selection)
if self.is_advanced:
self.drop_axes = tuple([i for i, dim_indexer in enumerate(self.dim_indexers)
if isinstance(dim_indexer, IntDimIndexer)])
else:
self.drop_axes = None
def __iter__(self):
for dim_projections in itertools.product(*self.dim_indexers):
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(p.dim_out_sel for p in dim_projections
if p.dim_out_sel is not None)
# handle advanced indexing arrays orthogonally
if self.is_advanced:
# N.B., numpy doesn't support orthogonal indexing directly as yet,
# so need to work around via np.ix_. Also np.ix_ does not support a
# mixture of arrays and slices or integers, so need to convert slices
# and integers into ranges.
chunk_selection = ix_(chunk_selection, self.array._chunks)
# special case for non-monotonic indices
if not is_basic_selection(out_selection):
out_selection = ix_(out_selection, self.shape)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection)
class OIndex:
def __init__(self, array):
self.array = array
def __getitem__(self, selection):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
return self.array.get_orthogonal_selection(selection, fields=fields)
def __setitem__(self, selection, value):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
return self.array.set_orthogonal_selection(selection, value, fields=fields)
# noinspection PyProtectedMember
def is_coordinate_selection(selection, array):
return (
(len(selection) == len(array._shape)) and
all(is_integer(dim_sel) or is_integer_array(dim_sel)
for dim_sel in selection)
)
# noinspection PyProtectedMember
def is_mask_selection(selection, array):
return (
len(selection) == 1 and
is_bool_array(selection[0]) and
selection[0].shape == array._shape
)
# noinspection PyProtectedMember
class CoordinateIndexer:
def __init__(self, selection, array):
# some initial normalization
selection = ensure_tuple(selection)
selection = tuple([i] if is_integer(i) else i for i in selection)
selection = replace_lists(selection)
# validation
if not is_coordinate_selection(selection, array):
raise IndexError('invalid coordinate selection; expected one integer '
'(coordinate) array per dimension of the target array, '
'got {!r}'.format(selection))
# handle wraparound, boundscheck
for dim_sel, dim_len in zip(selection, array.shape):
# handle wraparound
wraparound_indices(dim_sel, dim_len)
# handle out of bounds
boundscheck_indices(dim_sel, dim_len)
# compute chunk index for each point in the selection
chunks_multi_index = tuple(
dim_sel // dim_chunk_len
for (dim_sel, dim_chunk_len) in zip(selection, array._chunks)
)
# broadcast selection - this will raise error if array dimensions don't match
selection = np.broadcast_arrays(*selection)
chunks_multi_index = np.broadcast_arrays(*chunks_multi_index)
# remember shape of selection, because we will flatten indices for processing
self.sel_shape = selection[0].shape if selection[0].shape else (1,)
# flatten selection
selection = [dim_sel.reshape(-1) for dim_sel in selection]
chunks_multi_index = [dim_chunks.reshape(-1) for dim_chunks in chunks_multi_index]
# ravel chunk indices
chunks_raveled_indices = np.ravel_multi_index(chunks_multi_index,
dims=array._cdata_shape)
# group points by chunk
if np.any(np.diff(chunks_raveled_indices) < 0):
# optimisation, only sort if needed
sel_sort = np.argsort(chunks_raveled_indices)
selection = tuple(dim_sel[sel_sort] for dim_sel in selection)
else:
sel_sort = None
# store attributes
self.selection = selection
self.sel_sort = sel_sort
self.shape = selection[0].shape if selection[0].shape else (1,)
self.drop_axes = None
self.array = array
# precompute number of selected items for each chunk
self.chunk_nitems = np.bincount(chunks_raveled_indices, minlength=array.nchunks)
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
# locate the chunks we need to process
self.chunk_rixs = np.nonzero(self.chunk_nitems)[0]
# unravel chunk indices
self.chunk_mixs = np.unravel_index(self.chunk_rixs, array._cdata_shape)
def __iter__(self):
# iterate over chunks
for i, chunk_rix in enumerate(self.chunk_rixs):
chunk_coords = tuple(m[i] for m in self.chunk_mixs)
if chunk_rix == 0:
start = 0
else:
start = self.chunk_nitems_cumsum[chunk_rix - 1]
stop = self.chunk_nitems_cumsum[chunk_rix]
if self.sel_sort is None:
out_selection = slice(start, stop)
else:
out_selection = self.sel_sort[start:stop]
chunk_offsets = tuple(
dim_chunk_ix * dim_chunk_len
for dim_chunk_ix, dim_chunk_len in zip(chunk_coords, self.array._chunks)
)
chunk_selection = tuple(
dim_sel[start:stop] - dim_chunk_offset
for (dim_sel, dim_chunk_offset) in zip(self.selection, chunk_offsets)
)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection)
# noinspection PyProtectedMember
class MaskIndexer(CoordinateIndexer):
def __init__(self, selection, array):
# some initial normalization
selection = ensure_tuple(selection)
selection = replace_lists(selection)
# validation
if not is_mask_selection(selection, array):
raise IndexError('invalid mask selection; expected one Boolean (mask)'
'array with the same shape as the target array, got {!r}'
.format(selection))
# convert to indices
selection = np.nonzero(selection[0])
# delegate the rest to superclass
super().__init__(selection, array)
class VIndex:
def __init__(self, array):
self.array = array
def __getitem__(self, selection):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
if is_coordinate_selection(selection, self.array):
return self.array.get_coordinate_selection(selection, fields=fields)
elif is_mask_selection(selection, self.array):
return self.array.get_mask_selection(selection, fields=fields)
else:
raise VindexInvalidSelectionError(selection)
def __setitem__(self, selection, value):
fields, selection = pop_fields(selection)
selection = ensure_tuple(selection)
selection = replace_lists(selection)
if is_coordinate_selection(selection, self.array):
self.array.set_coordinate_selection(selection, value, fields=fields)
elif is_mask_selection(selection, self.array):
self.array.set_mask_selection(selection, value, fields=fields)
else:
raise VindexInvalidSelectionError(selection)
def check_fields(fields, dtype):
# early out
if fields is None:
return dtype
# check type
if not isinstance(fields, (str, list, tuple)):
raise IndexError("'fields' argument must be a string or list of strings; found "
"{!r}".format(type(fields)))
if fields:
if dtype.names is None:
raise IndexError("invalid 'fields' argument, array does not have any fields")
try:
if isinstance(fields, str):
# single field selection
out_dtype = dtype[fields]
else:
# multiple field selection
out_dtype = np.dtype([(f, dtype[f]) for f in fields])
except KeyError as e:
raise IndexError("invalid 'fields' argument, field not found: {!r}".format(e))
else:
return out_dtype
else:
return dtype
def check_no_multi_fields(fields):
if isinstance(fields, list):
if len(fields) == 1:
return fields[0]
elif len(fields) > 1:
raise IndexError('multiple fields are not supported for this operation')
return fields
def pop_fields(selection):
if isinstance(selection, str):
# single field selection
fields = selection
selection = ()
elif not isinstance(selection, tuple):
# single selection item, no fields
fields = None
# leave selection as-is
else:
# multiple items, split fields from selection items
fields = [f for f in selection if isinstance(f, str)]
fields = fields[0] if len(fields) == 1 else fields
selection = tuple(s for s in selection if not isinstance(s, str))
selection = selection[0] if len(selection) == 1 else selection
return fields, selection
def make_slice_selection(selection):
ls = []
for dim_selection in selection:
if is_integer(dim_selection):
ls.append(slice(int(dim_selection), int(dim_selection) + 1, 1))
elif isinstance(dim_selection, np.ndarray):
if len(dim_selection) == 1:
ls.append(
slice(
int(dim_selection[0]), int(dim_selection[0]) + 1, 1
)
)
else:
raise ArrayIndexError()
else:
ls.append(dim_selection)
return ls
class PartialChunkIterator:
"""Iterator to retrieve the specific coordinates of requested data
from within a compressed chunk.
Parameters
----------
selection : tuple
tuple of slice objects to take from the chunk
arr_shape : shape of chunk to select data from
Attributes
-----------
arr_shape
selection
Returns
-------
Tuple with 3 elements:
start: int
elements offset in the chunk to read from
nitems: int
number of elements to read in the chunk from start
partial_out_selection: list of slices
indices of a temporary empty array of size `Array._chunks` to assign
the decompressed data to after the partial read.
Notes
-----
An array is flattened when compressed with blosc, so this iterator takes
the wanted selection of an array and determines the wanted coordinates
of the flattened, compressed data to be read and then decompressed. The
decompressed data is then placed in a temporary empty array of size
`Array._chunks` at the indices yielded as partial_out_selection.
Once all the slices yielded by this iterator have been read, decompressed
and written to the temporary array, the wanted slice of the chunk can be
indexed from the temporary array and written to the out_selection slice
of the out array.
"""
def __init__(self, selection, arr_shape):
selection = make_slice_selection(selection)
self.arr_shape = arr_shape
# number of selection dimensions can't be greater than the number of chunk dimensions
if len(selection) > len(self.arr_shape):
raise ValueError(
"Selection has more dimensions then the array:\n"
f"selection dimensions = {len(selection)}\n"
f"array dimensions = {len(self.arr_shape)}"
)
# any selection can not be out of the range of the chunk
selection_shape = np.empty(self.arr_shape)[tuple(selection)].shape
if any(
selection_dim < 0 or selection_dim > arr_dim
for selection_dim, arr_dim in zip(selection_shape, self.arr_shape)
):
raise IndexError(
"a selection index is out of range for the dimension"
) # pragma: no cover
for i, dim_size in enumerate(self.arr_shape[::-1]):
index = len(self.arr_shape) - (i + 1)
if index <= len(selection) - 1:
slice_size = selection_shape[index]
if slice_size == dim_size and index > 0:
selection.pop()
else:
break
chunk_loc_slices = []
last_dim_slice = None if selection[-1].step > 1 else selection.pop()
for arr_shape_i, sl in zip(arr_shape, selection):
dim_chunk_loc_slices = []
assert isinstance(sl, slice)
for x in slice_to_range(sl, arr_shape_i):
dim_chunk_loc_slices.append(slice(x, x + 1, 1))
chunk_loc_slices.append(dim_chunk_loc_slices)
if last_dim_slice:
chunk_loc_slices.append([last_dim_slice])
self.chunk_loc_slices = list(itertools.product(*chunk_loc_slices))
def __iter__(self):
chunk1 = self.chunk_loc_slices[0]
nitems = (chunk1[-1].stop - chunk1[-1].start) * np.prod(
self.arr_shape[len(chunk1):], dtype=int
)
for partial_out_selection in self.chunk_loc_slices:
start = 0
for i, sl in enumerate(partial_out_selection):
start += sl.start * np.prod(self.arr_shape[i + 1:], dtype=int)
yield start, nitems, partial_out_selection
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" Compare the output of various Kiva SVG implementations against other
renderers.
"""
from cStringIO import StringIO
import glob
import logging
import os
import pstats
import sys
from xml.etree import cElementTree as ET
import warnings
import Image
import numpy as np
from enable.api import Component
from enable.component_editor import ComponentEditor
from traits.api import (Any, Button, Dict, HasTraits, HTML, Instance,
List, Property, Str, on_trait_change)
from traitsui import api as tui
from enable.savage.svg import document
from enable.savage.trait_defs.ui.svg_editor import SVGEditor
from enable.savage.svg.backends.wx.renderer import Renderer as WxRenderer
from enable.savage.svg.backends.kiva.renderer import Renderer as KivaRenderer
from crosshair import Crosshair, MultiController
from profile_this import ProfileThis
from sike import Sike
from svg_component import ImageComponent, SVGComponent
from xml_view import xml_to_tree, xml_tree_editor
logger = logging.getLogger()
this_dir = os.path.abspath(os.path.dirname(__file__))
class ComponentTrait(Instance):
""" Convenience trait for Enable Components.
"""
def __init__(self, **kwds):
kwds.setdefault('klass', Component)
super(ComponentTrait, self).__init__(**kwds)
def create_editor(self):
return ComponentEditor()
def normalize_text(text):
""" Utility to normalize the whitespace in text.
This is used in order to prevent wx's HTML widget from overzealously trying
to interpret the whitespace as indicating preformatted text.
"""
return ' '.join(text.strip().split())
def activate_tool(component, tool):
""" Add and activate an overlay tool.
"""
component.tools.append(tool)
component.overlays.append(tool)
component.active_tool = tool
return tool
class Comparator(HasTraits):
""" The main application.
"""
#### Configuration traits ##################################################
# The root directory of the test suite.
suitedir = Str()
# Mapping of SVG basenames to their reference PNGs. Use None if there is no
# reference PNG.
svg_png = Dict()
# The list of SVG file names.
svg_files = List()
# The name of the default PNG file to display when no reference PNG exists.
default_png = Str(os.path.join(this_dir, 'images/default.png'))
#### State traits ##########################################################
# The currently selected SVG file.
current_file = Str()
abs_current_file = Property(depends_on=['current_file'])
# The current XML ElementTree root Element and its XMLTree view model.
current_xml = Any()
current_xml_view = Any()
# The profilers.
profile_this = Instance(ProfileThis, args=())
#### GUI traits ############################################################
# The text showing the current mouse coordinates over any of the components.
mouse_coords = Property(Str, depends_on=['ch_controller.svg_coords'])
# Move forward and backward through the list of SVG files.
move_forward = Button('>>')
move_backward = Button('<<')
# The description of the test.
description = HTML()
document = Instance(document.SVGDocument)
# The components to view.
kiva_component = ComponentTrait(klass=SVGComponent)
ref_component = ComponentTrait(klass=ImageComponent, args=())
ch_controller = Instance(MultiController)
# The profiler views.
parsing_sike = Instance(Sike, args=())
drawing_sike = Instance(Sike, args=())
wx_doc_sike = Instance(Sike, args=())
kiva_doc_sike= Instance(Sike, args=())
traits_view = tui.View(
tui.Tabbed(
tui.VGroup(
tui.HGroup(
tui.Item('current_file', editor=tui.EnumEditor(name='svg_files'),
style='simple', width=1.0, show_label=False),
tui.Item('move_backward', show_label=False,
enabled_when="svg_files.index(current_file) != 0"),
tui.Item('move_forward', show_label=False,
enabled_when="svg_files.index(current_file) != len(svg_files)-1"),
),
tui.VSplit(
tui.HSplit(
tui.Item('description', label='Description', show_label=False),
tui.Item('current_xml_view', editor=xml_tree_editor, show_label=False),
),
tui.HSplit(
tui.Item('document', editor=SVGEditor(), show_label=False),
tui.Item('kiva_component', show_label=False),
tui.Item('ref_component', show_label=False),
# TODO: tui.Item('agg_component', show_label=False),
),
),
label='SVG',
),
tui.Item('parsing_sike', style='custom', show_label=False,
label='Parsing Profile'),
tui.Item('drawing_sike', style='custom', show_label=False,
label='Kiva Drawing Profile'),
tui.Item('wx_doc_sike', style='custom', show_label=False,
label='Creating WX document'),
tui.Item('kiva_doc_sike', style='custom', show_label=False,
label='Creating WX document'),
),
width=1280,
height=768,
resizable=True,
statusbar='mouse_coords',
title='SVG Comparator',
)
def __init__(self, **traits):
super(Comparator, self).__init__(**traits)
kiva_ch = activate_tool(self.kiva_component, Crosshair(self.kiva_component))
ref_ch = activate_tool(self.ref_component, Crosshair(self.ref_component))
self.ch_controller = MultiController(kiva_ch, ref_ch)
@classmethod
def fromsuitedir(cls, dirname, **traits):
""" Find all SVG files and their related reference PNG files under
a directory.
This assumes that the SVGs are located under <dirname>/svg/ and the
related PNGs under <dirname>/png/ and that there are no subdirectories.
"""
dirname = os.path.abspath(dirname)
svgs = glob.glob(os.path.join(dirname, 'svg', '*.svg'))
pngdir = os.path.join(dirname, 'png')
d = {}
for svg in svgs:
png = None
base = os.path.splitext(os.path.basename(svg))[0]
for prefix in ('full-', 'basic-', 'tiny-', ''):
fn = os.path.join(pngdir, prefix+base+'.png')
if os.path.exists(fn):
png = os.path.basename(fn)
break
d[os.path.basename(svg)] = png
svgs = sorted(d)
x = cls(suitedir=dirname, svg_png=d, svg_files=svgs, **traits)
x.current_file = svgs[0]
return x
def display_reference_png(self, filename):
""" Read the image file and shove its data into the display component.
"""
img = Image.open(filename)
arr = np.array(img)
self.ref_component.image = arr
def display_test_description(self):
""" Extract the test description for display.
"""
html = ET.Element('html')
title = self.current_xml.find('.//{http://www.w3.org/2000/svg}title')
if title is not None:
title_text = title.text
else:
title_text = os.path.splitext(self.current_file)[0]
p = ET.SubElement(html, 'p')
b = ET.SubElement(p, 'b')
b.text = 'Title: '
b.tail = title_text
desc_text = None
version_text = None
desc = self.current_xml.find('.//{http://www.w3.org/2000/svg}desc')
if desc is not None:
desc_text = desc.text
else:
testcase = self.current_xml.find('.//{http://www.w3.org/2000/02/svg/testsuite/description/}SVGTestCase')
if testcase is not None:
desc_text = testcase.get('desc', None)
version_text = testcase.get('version', None)
if desc_text is not None:
p = ET.SubElement(html, 'p')
b = ET.SubElement(p, 'b')
b.text = 'Description: '
b.tail = normalize_text(desc_text)
if version_text is None:
script = self.current_xml.find('.//{http://www.w3.org/2000/02/svg/testsuite/description/}OperatorScript')
if script is not None:
version_text = script.get('version', None)
if version_text is not None:
p = ET.SubElement(html, 'p')
b = ET.SubElement(p, 'b')
b.text = 'Version: '
b.tail = version_text
paras = self.current_xml.findall('.//{http://www.w3.org/2000/02/svg/testsuite/description/}Paragraph')
if len(paras) > 0:
div = ET.SubElement(html, 'div')
for para in paras:
p = ET.SubElement(div, 'p')
p.text = normalize_text(para.text)
# Copy over any children elements like <a>.
p[:] = para[:]
tree = ET.ElementTree(html)
f = StringIO()
tree.write(f)
text = f.getvalue()
self.description = text
def locate_file(self, name, kind):
""" Find the location of the given file in the suite.
Parameters
----------
name : str
Path of the file relative to the suitedir.
kind : either 'svg' or 'png'
The kind of file.
Returns
-------
path : str
The full path to the file.
"""
return os.path.join(self.suitedir, kind, name)
def _kiva_component_default(self):
return SVGComponent(profile_this=self.profile_this)
def _move_backward_fired(self):
idx = self.svg_files.index(self.current_file)
idx = max(idx-1, 0)
self.current_file = self.svg_files[idx]
def _move_forward_fired(self):
idx = self.svg_files.index(self.current_file)
idx = min(idx+1, len(self.svg_files)-1)
self.current_file = self.svg_files[idx]
def _get_abs_current_file(self):
return self.locate_file(self.current_file, 'svg')
def _current_file_changed(self, new):
# Reset the warnings filters. While it's good to only get 1 warning per
# file, we want to get the same warning again if a new file issues it.
warnings.resetwarnings()
self.profile_this.start('Parsing')
self.current_xml = ET.parse(self.abs_current_file).getroot()
self.current_xml_view = xml_to_tree(self.current_xml)
resources = document.ResourceGetter.fromfilename(self.abs_current_file)
self.profile_this.stop()
try:
self.profile_this.start('Creating WX document')
self.document = document.SVGDocument(self.current_xml,
resources=resources,
renderer=WxRenderer)
except:
logger.exception('Error parsing document %s', new)
self.document = None
self.profile_this.stop()
try:
self.profile_this.start('Creating Kiva document')
self.kiva_component.document = document.SVGDocument(self.current_xml,
resources=resources,
renderer=KivaRenderer)
except Exception, e:
logger.exception('Error parsing document %s', new)
self.kiva_component.document
self.profile_this.stop()
png_file = self.svg_png.get(new, None)
if png_file is None:
png_file = self.default_png
else:
png_file = self.locate_file(png_file, 'png')
self.display_test_description()
self.display_reference_png(png_file)
def _get_mouse_coords(self):
if self.ch_controller is None:
return ''
else:
return '%1.3g %1.3g' % self.ch_controller.svg_coords
@on_trait_change('profile_this:profile_ended')
def _update_profiling(self, new):
if new is not None:
name, p = new
stats = pstats.Stats(p)
if name == 'Parsing':
self.parsing_sike.stats = stats
elif name == 'Drawing':
self.drawing_sike.stats = stats
elif name == 'Creating WX document':
self.wx_doc_sike.stats = stats
elif name == 'Creating Kiva document':
self.kiva_doc_sike.stats = stats
class OpenClipartComparator(Comparator):
""" Locate SVG files and PNGs in directories laid out like the OpenClipart
packages.
"""
@classmethod
def fromsuitedir(cls, dirname, **traits):
""" Load SVG and reference PNGs from an OpenClipart directory.
"""
dirname = os.path.abspath(dirname)
def remove_prefix(path, dirname=dirname):
if path.startswith(dirname + os.path.sep):
path = path[len(dirname)+1:]
return path
svg_png = {}
for d, dirs, files in os.walk(dirname):
for fn in files:
fn = os.path.join(d, fn)
base, ext = os.path.splitext(fn)
if ext == '.svg':
png = os.path.join(d, base+'.png')
if os.path.exists(png):
png = remove_prefix(png)
else:
png = None
svg = remove_prefix(fn)
svg_png[svg] = png
svgs = sorted(svg_png)
x = cls(suitedir=dirname, svg_png=svg_png, svg_files=svgs, **traits)
x.current_file = svgs[0]
return x
def locate_file(self, name, kind):
return os.path.join(self.suitedir, name)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--openclipart', action='store_true',
help="The suite is in OpenClipart layout rather than the SVG test suite layout.")
parser.add_argument('--suitedir', nargs='?',
default=os.path.join(this_dir, 'w3c_svg_11'),
help="The directory with the test suite. [default: %(default)s]")
args = parser.parse_args()
logging.basicConfig(stream=sys.stdout)
if args.openclipart:
klass = OpenClipartComparator
else:
klass = Comparator
if os.path.isfile(args.suitedir):
# We were given a single SVG file.
if args.openclipart:
suitedir, svg = os.path.split(args.suitedir)
else:
svgdir, svg = os.path.split(args.suitedir)
suitedir = os.path.split(svgdir)[0]
c = klass(suitedir=suitedir)
png = os.path.splitext(svg)[0] + '.png'
if not os.path.exists(c.locate_file(png, 'png')):
png = None
c.svg_png = {svg: png}
c.svg_files = [svg]
c.current_file = svg
else:
c = klass.fromsuitedir(args.suitedir)
c.configure_traits()
if __name__ == '__main__':
main()
|
|
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
#from beerex_exe import Beerex
from ..beerex_exe import Beerex
# load transposed qaqc data for inputs and expected outputs
# csv_transpose_path_in = "./beerex_qaqc_in_transpose.csv"
# pd_obj_inputs = pd.read_csv(csv_transpose_path_in, index_col=0, engine='python')
# print(pd_obj_inputs)
# csv_transpose_path_exp = "./beerex_qaqc_exp_transpose.csv"
# pd_obj_exp_out = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
# print(pd_obj_exp_out)
# create empty pandas dataframes to create empty beerex object
df_empty = pd.DataFrame()
beerex_empty = Beerex(df_empty, df_empty)
test = {}
class TestBeerex(unittest.TestCase):
"""
Unit tests for Beerex.
"""
def setup(self):
"""
Setup routine for terrplant unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open beerex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def teardown(self):
"""
Teardown routine for terrplant unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
# each of these functions are queued by "run_methods" and have
# outputs defined as properties in the beerex qaqc csv
def test_eec_spray(self):
"""
unittest for function beerex.eec_spray
"""
# self.out_eec_spray = (110 * self.application_rate) / 1000
try:
expected_results = [1.1, 0.2750]
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.application_rate = pd.Series([10., 2.5])
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
result = beerex_empty.out_eec_spray
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_eec_soil(self):
"""
unittest for function beerex.eec_soil
"""
# self.out_eec_soil = ((10**(0.95*self.log_kow-2.05)+0.82) *
# (-0.0648*(self.log_kow**2)+0.2431*self.log_kow+0.5822) *
# (1.5/(0.2+1.5*self.koc*0.01)) * (0.5 * self.application_rate)) / 1000
try:
expected_results = [0.022253436, 0.0037878124]
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.log_kow = pd.Series([3., 2.2])
beerex_empty.koc = pd.Series([10., 1.3])
beerex_empty.application_rate = pd.Series([2., 0.72])
for i in range(len(expected_results)):
beerex_empty.eec_soil(i)
result = beerex_empty.out_eec_soil
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_eec_seed(self):
"""
unittest for function beerex.eec_seed
"""
# self.out_eec_seed = 1./1000. ??
# self.out_eec_seed = 1 * (self.lw4_pollen + self.lw4_nectar)/1000
try:
expected_results = [0.0010, 0.001]
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.lw4_pollen = pd.Series([4., 0.3])
beerex_empty.lw4_nectar = pd.Series([120., 2.8])
for i in range(len(expected_results)):
beerex_empty.eec_seed(i)
result = beerex_empty.out_eec_seed
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(str(tab), headers='keys', tablefmt='rst'))
return
def test_beerex_eec_tree(self):
"""
unittest for function beerex.eec_tree
"""
# self.out_eec_tree = (self.application_rate/self.mass_tree_vegetation) / 1000
try:
expected_results = [0.0100, 0.00011157]
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.application_rate = pd.Series([5., 2.7])
beerex_empty.mass_tree_vegetation = pd.Series([0.5, 24.2])
for i in range(len(expected_results)):
beerex_empty.eec_tree(i)
results = beerex_empty.out_eec_tree
npt.assert_array_almost_equal(results, expected_results, 4, '', True)
finally:
tab = [results, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# def test_beerex_eec_method(self):
# """
# unittest for function beerex.eec_method
# """
# # if self.application_method == "foliar spray":
# # self.out_eec_method = self.out_eec_spray
# # elif self.application_method == "soil application":
# # self.out_eec_method = self.out_eec_soil
# # elif self.application_method == "seed treatment":
# # self.out_eec_method = self.out_eec_seed
# # elif self.application_method == "tree trunk":
# # self.out_eec_method = self.out_eec_tree
# # return self.out_eec_method
#
# # self.out_eec_soil = ((10**(0.95*self.log_kow-2.05)+0.82) *
# # (-0.0648*(self.log_kow**2)+0.2431*self.log_kow+0.5822) *
# # (1.5/(0.2+1.5*self.koc*0.01)) * (0.5 * self.application_rate)) / 1000
# try:
# expected_results = [0.0010, 0.0031858]
# beerex_empty.application_method = pd.Series(['seed treatment', 'soil application'])
# beerex_empty.koc = pd.Series([0.76, 0.76])
# beerex_empty.log_kow = pd.Series([1.6, 1.6])
# beerex_empty.application_rate = pd.Series([1., 1.])
# beerex_empty.out_eec_method = pd.Series([3., 3.])
# beerex_empty.eec_method()
# result = beerex_empty.out_eec_method
# npt.assert_array_almost_equal(result, expected_results, 4, '', True)
# finally:
# tab = [result, expected_results]
# print("\n")
# print(inspect.currentframe().f_code.co_name)
# print(tabulate(str(tab), headers='keys', tablefmt='rst'))
# return
def test_beerex_lw1_total_dose(self):
"""
unittest for function beerex.lw1_total_dose
"""
# self.out_lw1_total_dose = (self.out_eec_method/100.) * self.lw1_jelly
try:
expected_results = [0.0550, 0.006765]
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.lw1_jelly = pd.Series([12.5, 8.2])
beerex_empty.application_rate = pd.Series([4., 0.75])
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lw1_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
beerex_empty.out_eec = beerex_empty.out_eec_spray
beerex_empty.lw1_total_dose()
result = beerex_empty.out_lw1_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw2_total_dose(self):
"""
unittest for function beerex.lw2_total_dose
"""
# self.out_lw2_total_dose = (self.out_eec_method/100.) * self.lw2_jelly
try:
expected_results = [0.000001784, 0.000007386]
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.lw2_jelly = pd.Series([0.223, 2.5])
beerex_empty.application_rate = pd.Series([10., 6.5])
beerex_empty.mass_tree_vegetation = pd.Series([12.5, 22.])
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lw2_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_tree(i)
beerex_empty.out_eec = beerex_empty.out_eec_tree
beerex_empty.lw2_total_dose()
result = beerex_empty.out_lw2_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw3_total_dose(self):
"""
unittest for function beerex.lw3_total_dose
"""
# self.out_lw3_total_dose = (self.out_eec_method/100.) * self.lw3_jelly
# self.out_eec_soil = ((10**(0.95*self.log_kow-2.05)+0.82) *
# (-0.0648*(self.log_kow**2)+0.2431*self.log_kow+0.5822) *
# (1.5/(0.2+1.5*self.koc*0.01)) * (0.5 * self.application_rate)) / 1000
try:
expected_results = [0.00058199, 0.000220323]
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.lw3_jelly = pd.Series([15.2, 3.2])
beerex_empty.application_rate = pd.Series([1.6, 2.1])
beerex_empty.log_kow = pd.Series([2., 2.1])
beerex_empty.koc = pd.Series([12.5, 7.6])
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lw3_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_soil(i)
beerex_empty.out_eec = beerex_empty.out_eec_soil
beerex_empty.lw3_total_dose()
result = beerex_empty.out_lw3_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw4_total_dose(self):
"""
unittest for function beerex.lw4_total_dose
"""
#if self.empirical_residue[0] == True:
# self.out_lw4_total_dose = ((self.empirical_pollen/1000.) * self.lw4_pollen) + ((self.empirical_nectar/1000.) * self.lw4_nectar)
#else:
# self.out_lw4_total_dose = (self.out_eec_method * self.lw4_pollen) + (self.out_eec_method * self.lw4_nectar)
# self.out_eec_spray = (110 * self.application_rate) / 1000
try:
expected_results = [0.04387, 0.011578]
beerex_empty.empirical_residue = pd.Series(["yes", "yes"])
beerex_empty.empirical_pollen = pd.Series([3.7, 4.3])
beerex_empty.empirical_nectar = pd.Series([5.4, 1.2])
beerex_empty.lw4_pollen = pd.Series([8.5, 2.5])
beerex_empty.lw4_nectar = pd.Series([2.3, 0.69])
beerex_empty.application_rate = pd.Series([0.75, 0.48])
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lw4_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
beerex_empty.out_eec = beerex_empty.out_eec_spray
beerex_empty.lw4_total_dose()
result = beerex_empty.out_lw4_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw5_total_dose(self):
"""
unittest for function beerex.lw5_total_dose
"""
#if self.empirical_residue[0] == True:
# self.out_lw5_total_dose = ((self.empirical_pollen/1000.) * self.lw5_pollen) + ((self.empirical_nectar/1000.) * self.lw5_nectar)
#else:
# self.out_lw5_total_dose = (self.out_eec_method * self.lw5_pollen) + (self.out_eec_method * self.lw5_nectar)
try:
expected_results = [0.032169, 0.07042]
beerex_empty.empirical_residue = pd.Series(["yes", "yes"])
beerex_empty.empirical_pollen = pd.Series([7.2, 12.2])
beerex_empty.empirical_nectar = pd.Series([2.1, 1.6])
beerex_empty.lw5_pollen = pd.Series([2.75, 5.3])
beerex_empty.lw5_nectar = pd.Series([5.89, 3.6])
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lw5_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_seed(i)
beerex_empty.out_eec = beerex_empty.out_eec_seed
beerex_empty.lw5_total_dose()
result = beerex_empty.out_lw5_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_ld6_total_dose(self):
"""
unittest for function beerex.ld6_total_dose
"""
#if self.empirical_residue[0] == True:
# self.out_ld6_total_dose = ((self.empirical_pollen/1000.) * self.ld6_pollen) + ((self.empirical_nectar/1000.) * self.ld6_nectar)
#else:
# self.out_ld6_total_dose = (self.out_eec_method * self.ld6_pollen) + (self.out_eec_method * self.ld6_nectar)
try:
expected_results = [0.03708036, 0.0437]
beerex_empty.empirical_residue = pd.Series(["yes", "yes"])
beerex_empty.empirical_pollen = pd.Series([7.3, 4.5])
beerex_empty.empirical_nectar = pd.Series([2.5, 11.])
beerex_empty.ld6_pollen = pd.Series([0.8432, 3.6])
beerex_empty.ld6_nectar = pd.Series([12.37, 2.5])
beerex_empty.application_rate = pd.Series([5., 4.2])
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_ld6_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
beerex_empty.out_eec = beerex_empty.out_eec_spray
beerex_empty.ld6_total_dose()
result = beerex_empty.out_ld6_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq1_total_dose(self):
"""
unittest for function beerex.lq1_total_dose
"""
# self.out_lq1_total_dose = (self.out_eec_method/100.) * self.lq1_jelly
try:
expected_results = [0.00000550588235, 0.00000177455]
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.lq1_jelly = pd.Series([1.35, 5.3])
beerex_empty.application_rate = pd.Series([5.2, 0.75])
beerex_empty.mass_tree_vegetation = pd.Series([12.75, 22.4])
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lq1_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_tree(i)
beerex_empty.out_eec = beerex_empty.out_eec_tree
beerex_empty.lq1_total_dose()
result = beerex_empty.out_lq1_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq2_total_dose(self):
"""
unittest for function beerex.lq2_total_dose
"""
# self.out_lq2_total_dose = (self.out_eec_method/100.) * self.lq2_jelly
try:
expected_results = [0.000065, 0.000028]
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.lq2_jelly = pd.Series([6.5, 2.8])
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lq2_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_seed(i)
beerex_empty.out_eec = beerex_empty.out_eec_seed
beerex_empty.lq2_total_dose()
result = beerex_empty.out_lq2_total_dose
npt.assert_array_almost_equal(result, expected_results)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq3_total_dose(self):
"""
unittest for function beerex.lq3_total_dose
"""
# self.out_lq3_total_dose = (self.out_eec_method/100.) * self.lq3_jelly
try:
expected_results = [0.055055, 0.0132]
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.lq3_jelly = pd.Series([14.3, 7.5])
beerex_empty.application_rate = pd.Series([3.5, 1.6])
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lq3_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
beerex_empty.out_eec = beerex_empty.out_eec_spray
beerex_empty.lq3_total_dose()
result = beerex_empty.out_lq3_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq4_total_dose(self):
"""
unittest for function beerex.lq4_total_dose
"""
#if self.empirical_residue[0] == True:
# self.out_lq4_total_dose = (self.empirical_jelly/1000.) * self.lq4_jelly
#else:
# self.out_lq4_total_dose = (self.out_eec_method/100.) * self.lq4_jelly
try:
expected_results = [0.15136, 0.48521]
beerex_empty.empirical_residue = pd.Series(["yes", "yes"])
beerex_empty.empirical_jelly = pd.Series([6.4, 12.1])
beerex_empty.lq4_jelly = pd.Series([23.65, 40.1])
beerex_empty.application_rate = pd.Series([1.45, 2.3])
beerex_empty.log_kow = pd.Series([5., 3.1])
beerex_empty.koc = pd.Series([24.1, 12.4])
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_lq4_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_soil(i)
beerex_empty.out_eec = beerex_empty.out_eec_soil
beerex_empty.lq4_total_dose()
result = beerex_empty.out_lq4_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_cell_total_dose(self):
"""
unittest for function beerex.aw_cell_total_dose
"""
# self.out_aw_cell_total_dose = (self.out_eec_method * self.aw_cell_nectar) + (self.out_eec_method * self.aw_cell_pollen)
try:
expected_results = [0.0037, 0.0176]
beerex_empty.aw_cell_pollen = pd.Series([2.4, 12.3])
beerex_empty.aw_cell_nectar = pd.Series([1.3, 5.3])
beerex_empty.empirical_residue = (["no", "no"])
beerex_empty.out_aw_cell_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_seed(i)
beerex_empty.out_eec = beerex_empty.out_eec_seed
beerex_empty.aw_cell_total_dose()
result = beerex_empty.out_aw_cell_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_brood_total_dose(self):
"""
unittest for function beerex.aw_brood_total_dose
"""
# self.out_aw_brood_total_dose = (self.out_eec_method * self.aw_brood_nectar) + (self.out_eec_method * self.aw_brood_pollen)
try:
expected_results = [23.0725, 6.5472]
beerex_empty.out_aw_brood_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.aw_brood_pollen = pd.Series([5.7, 6.5])
beerex_empty.aw_brood_nectar = pd.Series([78.2, 43.1])
beerex_empty.application_rate = pd.Series([2.5, 1.2])
beerex_empty.n_runs = len(expected_results)
beerex_empty.empirical_residue = (["no", "no"])
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
beerex_empty.out_eec = beerex_empty.out_eec_spray
beerex_empty.aw_brood_total_dose()
result = beerex_empty.out_aw_brood_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_comb_total_dose(self):
"""
unittest for function beerex.aw_comb_total_dose
"""
# self.out_aw_comb_total_dose = (self.out_eec_method * self.aw_comb_nectar) + (self.out_eec_method * self.aw_comb_pollen)
# self.out_eec_soil = ((10**(0.95*self.log_kow-2.05)+0.82) *
# (-0.0648*(self.log_kow**2)+0.2431*self.log_kow+0.5822) *
# (1.5/(0.2+1.5*self.koc*0.01)) * (0.5 * self.application_rate)) / 1000
try:
expected_results = [1.2011859, 0.118363]
beerex_empty.aw_comb_pollen = pd.Series([6.2, 35.1])
beerex_empty.aw_comb_nectar = pd.Series([25.9, 12.4])
beerex_empty.log_kow = pd.Series([3.4, 0.63])
beerex_empty.koc = pd.Series([16.2, 2.5])
beerex_empty.application_rate = pd.Series([2.1, 1.3])
beerex_empty.empirical_residue = (["no", "no"])
beerex_empty.out_aw_comb_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_soil(i)
beerex_empty.out_eec = beerex_empty.out_eec_soil
beerex_empty.aw_comb_total_dose()
result = beerex_empty.out_aw_comb_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_pollen_total_dose(self):
"""
unittest for function beerex.aw_pollen_total_dose
"""
# self.out_aw_pollen_total_dose = (self.out_eec_method * self.aw_fpollen_nectar) + (self.out_eec_method * self.aw_fpollen_pollen)
try:
expected_results = [0.015225, 0.000689367]
beerex_empty.aw_fpollen_pollen = pd.Series([6.7, 12.4])
beerex_empty.aw_fpollen_nectar = pd.Series([54.2, 26.5])
beerex_empty.application_rate = pd.Series([4.2, 0.98])
beerex_empty.mass_tree_vegetation = pd.Series([16.8, 55.3])
beerex_empty.n_runs = len(expected_results)
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_aw_pollen_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
for i in range(len(expected_results)):
beerex_empty.eec_tree(i)
beerex_empty.out_eec = beerex_empty.out_eec_tree
beerex_empty.aw_pollen_total_dose()
result = beerex_empty.out_aw_pollen_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_nectar_total_dose(self):
"""
unittest for function beerex.aw_nectar_total_dose
"""
# self.out_aw_nectar_total_dose = (self.out_eec_method * self.aw_fnectar_nectar) + (self.out_eec_method * self.aw_fnectar_pollen)
try:
expected_results = [0.0273, 0.0296]
beerex_empty.aw_fnectar_pollen = pd.Series([3.8, 16.9])
beerex_empty.aw_fnectar_nectar = pd.Series([23.5, 12.7])
beerex_empty.out_aw_nectar_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
beerex_empty.empirical_residue = (["no", "no"])
for i in range(len(expected_results)):
beerex_empty.eec_seed(i)
beerex_empty.out_eec = beerex_empty.out_eec_seed
beerex_empty.aw_nectar_total_dose()
result = beerex_empty.out_aw_nectar_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_winter_total_dose(self):
"""
unittest for function beerex.aw_winter_total_dose
"""
# self.out_aw_winter_total_dose = (self.out_eec_method * self.aw_winter_nectar) + (self.out_eec_method * self.aw_winter_pollen)
try:
expected_results = [0.013073320537, 0.0016036363]
beerex_empty.aw_winter_pollen = pd.Series([8.2, 6.9])
beerex_empty.aw_winter_nectar = pd.Series([86.4, 22.5])
beerex_empty.application_rate = pd.Series([7.2, 1.5])
beerex_empty.mass_tree_vegetation = pd.Series([52.1, 27.5])
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_aw_winter_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
beerex_empty.empirical_residue = pd.Series(["no", "no"])
for i in range(len(expected_results)):
beerex_empty.eec_tree(i)
beerex_empty.out_eec = beerex_empty.out_eec_tree
beerex_empty.aw_winter_total_dose()
result = beerex_empty.out_aw_winter_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_ad_total_dose(self):
"""
unittest for function beerex.ad_total_dose
"""
#if self.empirical_residue[0] == True:
# self.out_ad_total_dose = ((self.empirical_nectar/1000.) * self.ad_nectar) + ((self.empirical_pollen/1000.) * self.ad_pollen)
#else:
# self.out_ad_total_dose = (self.out_eec_method * self.ad_nectar) + (self.out_eec_method * self.ad_pollen)
try:
expected_results = [0.02904, 0.10844]
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_ad_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.empirical_residue = pd.Series(["yes", "yes"])
beerex_empty.ad_pollen = pd.Series([2.4, 36.5])
beerex_empty.ad_nectar = pd.Series([22.8, 2.1])
beerex_empty.application_rate = pd.Series([8.9, 0.88])
beerex_empty.empirical_nectar = pd.Series([1.2, 13.4])
beerex_empty.empirical_pollen = pd.Series([0.7, 2.2])
beerex_empty.n_runs = len(expected_results)
for i in range(len(expected_results)):
beerex_empty.eec_spray(i)
beerex_empty.ad_total_dose()
result = beerex_empty.out_ad_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
tab = [result, expected_results]
except ValueError:
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aq_total_dose(self):
"""
unittest for function beerex.aq_total_dose
"""
# self.out_aq_total_dose = (self.out_eec_method/100.) * self.aq_jelly
# self.out_eec_soil = ((10**(0.95*self.log_kow-2.05)+0.82) *
# (-0.0648*(self.log_kow**2)+0.2431*self.log_kow+0.5822) *
# (1.5/(0.2+1.5*self.koc*0.01)) * (0.5 * self.application_rate)) / 1000
try:
expected_results = [-85.7931737, 0.021943272]
beerex_empty.out_eec_soil = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_spray = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_seed = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec_tree = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_eec = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.out_aq_total_dose = pd.Series(np.nan, index=range(len(expected_results)), dtype="float")
beerex_empty.n_runs = len(expected_results)
beerex_empty.empirical_residue = pd.Series(["no", "no"])
beerex_empty.aq_jelly = pd.Series([223., 68.7])
beerex_empty.log_kow = pd.Series([6.3, 2.4])
beerex_empty.koc = pd.Series([4.1, 25.4])
beerex_empty.application_rate = pd.Series([3.4, 12.4])
for i in range(len(expected_results)):
print(i)
beerex_empty.eec_soil(i)
beerex_empty.out_eec = beerex_empty.out_eec_soil
beerex_empty.aq_total_dose()
result = beerex_empty.out_aq_total_dose
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw1_acute_rq(self):
"""
unittest for function beerex.lw1_acute_rq
"""
# self.out_lw1_acute_rq = self.out_lw1_total_dose/self.larval_ld50
try:
expected_results = [5.259259, 0.34929577]
beerex_empty.out_lw1_total_dose = pd.Series([14.2, 12.4])
beerex_empty.larval_ld50 = pd.Series([2.7, 35.5])
result = beerex_empty.lw1_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw2_acute_rq(self):
"""
unittest for function beerex.lw2_acute_rq
"""
# self.out_lw2_acute_rq = self.out_lw2_total_dose/self.larval_ld50
try:
expected_results = [0.35570469, 2.13461538]
beerex_empty.out_lw2_total_dose = pd.Series([5.3, 22.2])
beerex_empty.larval_ld50 = pd.Series([14.9, 10.4])
result = beerex_empty.lw2_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw3_acute_rq(self):
"""
unittest for function beerex.lw3_acute_rq
"""
# self.out_lw3_acute_rq = self.out_lw3_total_dose/self.larval_ld50
try:
expected_results = [3.373563, 0.20754717]
beerex_empty.out_lw3_total_dose = pd.Series([58.7, 1.1])
beerex_empty.larval_ld50 = pd.Series([17.4, 5.3])
result = beerex_empty.lw3_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw4_acute_rq(self):
"""
unittest for function beerex.lw4_acute_rq
"""
# self.out_lw4_acute_rq = self.out_lw4_total_dose/self.larval_ld50
try:
expected_results = [3.782608, 1.57142857]
beerex_empty.out_lw4_total_dose = pd.Series([8.7, 2.2])
beerex_empty.larval_ld50 = pd.Series([2.3, 1.4])
result = beerex_empty.lw4_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw5_acute_rq(self):
"""
unittest for function beerex.lw5_acute_rq
"""
# self.out_lw5_acute_rq = self.out_lw5_total_dose/self.larval_ld50
try:
expected_results = [20.08333, 2.672727273]
beerex_empty.out_lw5_total_dose = pd.Series([24.1, 14.7])
beerex_empty.larval_ld50 = pd.Series([1.2, 5.5])
result = beerex_empty.lw5_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_ld6_acute_rq(self):
"""
unittest for function beerex.ld6_acute_rq
"""
# self.out_ld6_acute_rq = self.out_ld6_total_dose/self.larval_ld50
try:
expected_results = [0.782258, 5.82142857]
beerex_empty.out_ld6_total_dose = pd.Series([9.7, 16.3])
beerex_empty.larval_ld50 = pd.Series([12.4, 2.8])
result = beerex_empty.ld6_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq1_acute_rq(self):
"""
unittest for function beerex.lq1_acute_rq
"""
# self.out_lq1_acute_rq = self.out_lq1_total_dose/self.larval_ld50
try:
expected_results = [1.7420, 0.2068966]
beerex_empty.out_lq1_total_dose = pd.Series([174.2, 3.6])
beerex_empty.larval_ld50 = pd.Series([100., 17.4])
result = beerex_empty.lq1_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq2_acute_rq(self):
"""
unittest for function beerex.lq2_acute_rq
"""
# self.out_lq2_acute_rq = self.out_lq2_total_dose/self.larval_ld50
try:
expected_results = [1.77011, 0.5789474]
beerex_empty.out_lq2_total_dose = pd.Series([15.4, 2.2])
beerex_empty.larval_ld50 = pd.Series([8.7, 3.8])
result = beerex_empty.lq2_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq3_acute_rq(self):
"""
unittest for function beerex.lq3_acute_rq
"""
# self.out_lq3_acute_rq = self.out_lq3_total_dose/self.larval_ld50
try:
expected_results = [7.659793, 1.6623377]
beerex_empty.out_lq3_total_dose = pd.Series([74.3, 25.6])
beerex_empty.larval_ld50 = pd.Series([9.7, 15.4])
result = beerex_empty.lq3_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq4_acute_rq(self):
"""
unittest for function beerex.lq4_acute_rq
"""
# self.out_lq4_acute_rq = self.out_lq4_total_dose/self.larval_ld50
try:
expected_results = [0.3620689, 9.782608696]
beerex_empty.out_lq4_total_dose = pd.Series([6.3, 22.5])
beerex_empty.larval_ld50 = pd.Series([17.4, 2.3])
result = beerex_empty.lq4_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_cell_acute_rq(self):
"""
unittest for function beerex.aw_cell_acute_rq
"""
# self.out_aw_cell_total_dose self.out_aw_cell_total_dose/self.adult_oral_ld50
try:
expected_results = [6.154929, 2.48]
beerex_empty.out_aw_cell_total_dose = pd.Series([87.4, 18.6])
beerex_empty.adult_oral_ld50 = pd.Series([14.2, 7.5])
result = beerex_empty.aw_cell_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_brood_acute_rq(self):
"""
unittest for function beerex.aw_brood_acute_rq
"""
# self.out_aw_brood_total_dose self.out_aw_brood_total_dose/self.adult_oral_ld50
try:
expected_results = [10.68181, 1.823529412]
beerex_empty.out_aw_brood_total_dose = pd.Series([23.5, 12.4])
beerex_empty.adult_oral_ld50 = pd.Series([2.2, 6.8])
result = beerex_empty.aw_brood_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_comb_acute_rq(self):
"""
unittest for function beerex.aw_comb_acute_rq
"""
# self.out_aw_comb_total_dose self.out_aw_comb_total_dose/self.adult_oral_ld50
try:
expected_results = [1.95031, 1.2378378]
beerex_empty.out_aw_comb_total_dose = pd.Series([62.8, 22.9])
beerex_empty.adult_oral_ld50 = pd.Series([32.2, 18.5])
result = beerex_empty.aw_comb_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_pollen_acute_rq(self):
"""
unittest for function beerex.aw_pollen_acute_rq
"""
# self.out_aw_pollen_total_dose self.out_aw_pollen_total_dose/self.adult_oral_ld50
try:
expected_results = [0.884615, 2.6377953]
beerex_empty.out_aw_pollen_total_dose = pd.Series([6.9, 33.5])
beerex_empty.adult_oral_ld50 = pd.Series([7.8, 12.7])
result = beerex_empty.aw_pollen_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_nectar_acute_rq(self):
"""
unittest for function beerex.aw_nectar_acute_rq
"""
# self.out_aw_nectar_total_dose self.out_aw_nectar_total_dose/self.adult_oral_ld50
try:
expected_results = [4.83333, 0.187919463]
beerex_empty.out_aw_nectar_total_dose = pd.Series([124.7, 2.8])
beerex_empty.adult_oral_ld50 = pd.Series([25.8, 14.9])
result = beerex_empty.aw_nectar_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_winter_acute_rq(self):
"""
unittest for function beerex.aw_winter_acute_rq
"""
# self.out_aw_winter_total_dose self.out_aw_winter_total_dose/self.adult_oral_ld50
try:
expected_results = [0.079411, 1.1414141414]
beerex_empty.out_aw_winter_total_dose = pd.Series([0.54, 22.6])
beerex_empty.adult_oral_ld50 = pd.Series([6.8, 19.8])
result = beerex_empty.aw_winter_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_ad_acute_rq(self):
"""
unittest for function beerex.ad_acute_rq
"""
# self.out_ad_acute_rq = self.out_ad_total_dose/self.adult_oral_ld50
try:
expected_results = [4.438596, 0.1464968]
beerex_empty.out_ad_total_dose = pd.Series([25.3, 2.3])
beerex_empty.adult_oral_ld50 = pd.Series([5.7, 15.7])
result = beerex_empty.ad_acute_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aq_acute_rq(self):
"""
unittest for function beerex.aq_acute_rq
"""
# self.out_aq_acute_rq = self.out_aq_total_dose/self.adult_oral_ld50
try:
expected_results = [5.266129, 0.01744186]
beerex_empty.out_aq_total_dose = pd.Series([65.3, 0.15])
beerex_empty.adult_oral_ld50 = pd.Series([12.4, 8.6])
result = beerex_empty.aq_acute_rq()
npt.assert_array_almost_equal(result, expected_results)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw1_chronic_rq(self):
"""
unittest for function beerex.lw1_chronic_rq
"""
# self.out_lw1_chronic_rq = self.out_lw1_total_dose/self.larval_noael
try:
expected_results = [3.22222, 0.220125786]
beerex_empty.out_lw1_total_dose = pd.Series([23.2, 3.5])
beerex_empty.larval_noael = pd.Series([7.2, 15.9])
result = beerex_empty.lw1_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw2_chronic_rq(self):
"""
unittest for function beerex.lw2_chronic_rq
"""
# self.out_lw2_chronic_rq = self.out_lw2_total_dose/self.larval_noael
try:
expected_results = [11.369565, 2.76712329]
beerex_empty.out_lw2_total_dose = pd.Series([52.3, 20.2])
beerex_empty.larval_noael = pd.Series([4.6, 7.3])
result = beerex_empty.lw2_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw3_chronic_rq(self):
"""
unittest for function beerex.lw3_chronic_rq
"""
# self.out_lw3_chronic_rq = self.out_lw3_total_dose/self.larval_noael
try:
expected_results = [2.47826, 1.03164557]
beerex_empty.out_lw3_total_dose = pd.Series([5.7, 16.3])
beerex_empty.larval_noael = pd.Series([2.3, 15.8])
result = beerex_empty.lw3_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw4_chronic_rq(self):
"""
unittest for function beerex.lw4_chronic_rq
"""
# self.out_lw4_chronic_rq = self.out_lw4_total_dose/self.larval_noael
try:
expected_results = [1.237069, 32.625]
beerex_empty.out_lw4_total_dose = pd.Series([28.7, 78.3])
beerex_empty.larval_noael = pd.Series([23.2, 2.4])
result = beerex_empty.lw4_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lw5_chronic_rq(self):
"""
unittest for function beerex.lw5_chronic_rq
"""
# self.out_lw5_chronic_rq = self.out_lw5_total_dose/self.larval_noael
try:
expected_results = [0.1750, 0.01688889]
beerex_empty.out_lw5_total_dose = pd.Series([2.1, 0.38])
beerex_empty.larval_noael = pd.Series([12., 22.5])
result = beerex_empty.lw5_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_ld6_chronic_rq(self):
"""
unittest for function beerex.ld6_chronic_rq
"""
# self.out_ld6_chronic_rq = self.out_ld6_total_dose/self.larval_noael
try:
expected_results = [1.3406432, 0.28125]
beerex_empty.out_ld6_total_dose = pd.Series([91.7, 3.6])
beerex_empty.larval_noael = pd.Series([68.4, 12.8])
result = beerex_empty.ld6_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq1_chronic_rq(self):
"""
unittest for function beerex.lq1_chronic_rq
"""
# self.out_lq1_chronic_rq = self.out_lq1_total_dose/self.larval_noael
try:
expected_results = [0.078947, 272.0833333]
beerex_empty.out_lq1_total_dose = pd.Series([1.2, 65.3])
beerex_empty.larval_noael = pd.Series([15.2, 0.24])
result = beerex_empty.lq1_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq2_chronic_rq(self):
"""
unittest for function beerex.lq2_chronic_rq
"""
# self.out_lq2_chronic_rq = self.out_lq2_total_dose/self.larval_noael
try:
expected_results = [0.0588235, 0.01617647]
beerex_empty.out_lq2_total_dose = pd.Series([4.2, 0.11])
beerex_empty.larval_noael = pd.Series([71.4, 6.8])
result = beerex_empty.lq2_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq3_chronic_rq(self):
"""
unittest for function beerex.lq3_chronic_rq
"""
# self.out_lq3_chronic_rq = self.out_lq3_total_dose/self.larval_noael
try:
expected_results = [0.503448, 0.44072165]
beerex_empty.out_lq3_total_dose = pd.Series([7.3, 34.2])
beerex_empty.larval_noael = pd.Series([14.5, 77.6])
result = beerex_empty.lq3_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_lq4_chronic_rq(self):
"""
unittest for function beerex.lq4_chronic_rq
"""
# self.out_lq4_chronic_rq = self.out_lq4_total_dose/self.larval_noael
try:
expected_results = [0.169863, 0.19411765]
beerex_empty.out_lq4_total_dose = pd.Series([6.2, 0.33])
beerex_empty.larval_noael = pd.Series([36.5, 1.7])
result = beerex_empty.lq4_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_cell_chronic_rq(self):
"""
unittest for function beerex.aw_cell_chronic_rq
"""
# self.out_aw_cell_total_dose = self.out_aw_cell_total_dose/self.adult_oral_noael
try:
expected_results = [3.652174, 5.1206897]
beerex_empty.out_aw_cell_total_dose = pd.Series([8.4, 29.7])
beerex_empty.adult_oral_noael = pd.Series([2.3, 5.8])
result = beerex_empty.aw_cell_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_brood_chronic_rq(self):
"""
unittest for function beerex.aw_brood_chronic_rq
"""
# self.out_aw_brood_total_dose = self.out_aw_brood_total_dose/self.adult_oral_noael
try:
expected_results = [0.22727, 2.8023256]
beerex_empty.out_aw_brood_total_dose = pd.Series([0.5, 24.1])
beerex_empty.adult_oral_noael = pd.Series([2.2, 8.6])
result = beerex_empty.aw_brood_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_comb_chronic_rq(self):
"""
unittest for function beerex.aw_comb_chronic_rq
"""
# self.out_aw_comb_total_dose = self.out_aw_comb_total_dose/self.adult_oral_noael
try:
expected_results = [4.00000, 0.74712644]
beerex_empty.out_aw_comb_total_dose = pd.Series([12.8, 0.65])
beerex_empty.adult_oral_noael = pd.Series([3.2, 0.87])
result = beerex_empty.aw_comb_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_pollen_chronic_rq(self):
"""
unittest for function beerex.aw_pollen_chronic_rq
"""
# self.out_aw_pollen_total_dose = self.out_aw_pollen_total_dose/self.adult_oral_noael
try:
expected_results = [1.724489, 4.1627907]
beerex_empty.out_aw_pollen_total_dose = pd.Series([16.9, 17.9])
beerex_empty.adult_oral_noael = pd.Series([9.8, 4.3])
result = beerex_empty.aw_pollen_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_nectar_chronic_rq(self):
"""
unittest for function beerex.aw_nectar_chronic_rq
"""
# self.out_aw_nectar_total_dose = self.out_aw_nectar_total_dose/self.adult_oral_noael
try:
expected_results = [0.1821705, 0.05147059]
beerex_empty.out_aw_nectar_total_dose = pd.Series([4.7, 0.35])
beerex_empty.adult_oral_noael = pd.Series([25.8, 6.8])
result = beerex_empty.aw_nectar_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aw_winter_chronic_rq(self):
"""
unittest for function beerex.aw_winter_chronic_rq
"""
# self.out_aw_winter_total_dose = self.out_aw_winter_total_dose/self.adult_oral_noael
try:
expected_results = [7.941176, 7.5080645]
beerex_empty.out_aw_winter_total_dose = pd.Series([54., 93.1])
beerex_empty.adult_oral_noael = pd.Series([6.8, 12.4])
result = beerex_empty.aw_winter_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_ad_chronic_rq(self):
"""
unittest for function beerex.ad_acute_rq
"""
# self.out_ad_chronic_rq = self.out_ad_total_dose/self.adult_oral_noael
try:
expected_results = [0.44094488, 0.41176471]
beerex_empty.out_ad_total_dose = pd.Series([5.6, 1.4])
beerex_empty.adult_oral_noael = pd.Series([12.7, 3.4])
result = beerex_empty.ad_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_beerex_aq_chronic_rq(self):
"""
unittest for function beerex.aq_chronic_rq
"""
# self.out_aq_chronic_rq = self.out_aq_total_dose/self.adult_oral_noael
try:
expected_results = [0.226496, 29.0000]
beerex_empty.out_aq_total_dose = pd.Series([5.3, 11.6])
beerex_empty.adult_oral_noael = pd.Series([23.4, 0.4])
result = beerex_empty.aq_chronic_rq()
npt.assert_array_almost_equal(result, expected_results, 4, '', True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
|
|
import itertools
from time import time
import Errors
import DebugFlags
import Options
from Visitor import CythonTransform
from Errors import CompileError, InternalError, AbortError
#
# Really small pipeline stages
#
def dumptree(t):
# For quick debugging in pipelines
print t.dump()
return t
def abort_on_errors(node):
# Stop the pipeline if there are any errors.
if Errors.num_errors != 0:
raise AbortError, "pipeline break"
return node
def parse_stage_factory(context):
def parse(compsrc):
source_desc = compsrc.source_desc
full_module_name = compsrc.full_module_name
initial_pos = (source_desc, 1, 0)
saved_cimport_from_pyx, Options.cimport_from_pyx = Options.cimport_from_pyx, False
scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0,
check_module_name = not Options.embed)
Options.cimport_from_pyx = saved_cimport_from_pyx
tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name)
tree.compilation_source = compsrc
tree.scope = scope
tree.is_pxd = False
return tree
return parse
def parse_pxd_stage_factory(context, scope, module_name):
def parse(source_desc):
tree = context.parse(source_desc, scope, pxd=True,
full_module_name=module_name)
tree.scope = scope
tree.is_pxd = True
return tree
return parse
def generate_pyx_code_stage_factory(options, result):
def generate_pyx_code_stage(module_node):
module_node.process_implementation(options, result)
result.compilation_source = module_node.compilation_source
return result
return generate_pyx_code_stage
def inject_pxd_code_stage_factory(context):
def inject_pxd_code_stage(module_node):
from textwrap import dedent
stats = module_node.body.stats
for name, (statlistnode, scope) in context.pxds.iteritems():
module_node.merge_in(statlistnode, scope)
return module_node
return inject_pxd_code_stage
def use_utility_code_definitions(scope, target, seen=None):
if seen is None:
seen = set()
for entry in scope.entries.itervalues():
if entry in seen:
continue
seen.add(entry)
if entry.used and entry.utility_code_definition:
target.use_utility_code(entry.utility_code_definition)
for required_utility in entry.utility_code_definition.requires:
target.use_utility_code(required_utility)
elif entry.as_module:
use_utility_code_definitions(entry.as_module, target, seen)
def inject_utility_code_stage_factory(context):
def inject_utility_code_stage(module_node):
use_utility_code_definitions(context.cython_scope, module_node.scope)
added = []
# Note: the list might be extended inside the loop (if some utility code
# pulls in other utility code, explicitly or implicitly)
for utilcode in module_node.scope.utility_code_list:
if utilcode in added: continue
added.append(utilcode)
if utilcode.requires:
for dep in utilcode.requires:
if not dep in added and not dep in module_node.scope.utility_code_list:
module_node.scope.utility_code_list.append(dep)
tree = utilcode.get_tree()
if tree:
module_node.merge_in(tree.body, tree.scope, merge_scope=True)
return module_node
return inject_utility_code_stage
class UseUtilityCodeDefinitions(CythonTransform):
# Temporary hack to use any utility code in nodes' "utility_code_definitions".
# This should be moved to the code generation phase of the relevant nodes once
# it is safe to generate CythonUtilityCode at code generation time.
def __call__(self, node):
self.scope = node.scope
return super(UseUtilityCodeDefinitions, self).__call__(node)
def process_entry(self, entry):
if entry:
for utility_code in (entry.utility_code, entry.utility_code_definition):
if utility_code:
self.scope.use_utility_code(utility_code)
def visit_AttributeNode(self, node):
self.process_entry(node.entry)
return node
def visit_NameNode(self, node):
self.process_entry(node.entry)
self.process_entry(node.type_entry)
return node
#
# Pipeline factories
#
def create_pipeline(context, mode, exclude_classes=()):
assert mode in ('pyx', 'py', 'pxd')
from Visitor import PrintTree
from ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
from ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform
from ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
from ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
from ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions
from ParseTreeTransforms import RemoveUnreachableCode, GilCheck
from FlowControl import ControlFlowAnalysis
from AnalysedTreeTransforms import AutoTestDictTransform
from AutoDocTransforms import EmbedSignature
from Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
from Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
from Optimize import InlineDefNodeCalls
from Optimize import ConstantFolding, FinalOptimizePhase
from Optimize import DropRefcountingTransform
from Buffer import IntroduceBufferAuxiliaryVars
from ModuleNode import check_c_declarations, check_c_declarations_pxd
if mode == 'pxd':
_check_c_declarations = check_c_declarations_pxd
_specific_post_parse = PxdPostParse(context)
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
if mode == 'py':
_align_function_definitions = AlignFunctionDefinitions(context)
else:
_align_function_definitions = None
# NOTE: This is the "common" parts of the pipeline, which is also
# code in pxd files. So it will be run multiple times in a
# compilation stage.
stages = [
NormalizeTree(context),
PostParse(context),
_specific_post_parse,
InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context),
AdjustDefByDirectives(context),
MarkClosureVisitor(context),
_align_function_definitions,
RemoveUnreachableCode(context),
ConstantFolding(),
FlattenInListTransform(),
WithTransform(context),
DecoratorTransform(context),
ForwardDeclareTypes(context),
AnalyseDeclarationsTransform(context),
AutoTestDictTransform(context),
EmbedSignature(context),
EarlyReplaceBuiltinCalls(context), ## Necessary?
TransformBuiltinMethods(context), ## Necessary?
MarkParallelAssignments(context),
ControlFlowAnalysis(context),
RemoveUnreachableCode(context),
# MarkParallelAssignments(context),
MarkOverflowingArithmetic(context),
IntroduceBufferAuxiliaryVars(context),
_check_c_declarations,
InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context),
FindInvalidUseOfFusedTypes(context),
CreateClosureClasses(context), ## After all lookups and type inference
ExpandInplaceOperators(context),
OptimizeBuiltinCalls(context), ## Necessary?
IterationTransform(),
SwitchTransform(),
DropRefcountingTransform(),
FinalOptimizePhase(context),
GilCheck(),
UseUtilityCodeDefinitions(context),
]
filtered_stages = []
for s in stages:
if s.__class__ not in exclude_classes:
filtered_stages.append(s)
return filtered_stages
def create_pyx_pipeline(context, options, result, py=False, exclude_classes=()):
if py:
mode = 'py'
else:
mode = 'pyx'
test_support = []
if options.evaluate_tree_assertions:
from Cython.TestUtils import TreeAssertVisitor
test_support.append(TreeAssertVisitor())
if options.gdb_debug:
from Cython.Debugger import DebugWriter # requires Py2.5+
from ParseTreeTransforms import DebugTransform
context.gdb_debug_outputwriter = DebugWriter.CythonDebugWriter(
options.output_dir)
debug_transform = [DebugTransform(context, options, result)]
else:
debug_transform = []
return list(itertools.chain(
[parse_stage_factory(context)],
create_pipeline(context, mode, exclude_classes=exclude_classes),
test_support,
[inject_pxd_code_stage_factory(context),
inject_utility_code_stage_factory(context),
abort_on_errors],
debug_transform,
[generate_pyx_code_stage_factory(options, result)]))
def create_pxd_pipeline(context, scope, module_name):
from CodeGeneration import ExtractPxdCode
# The pxd pipeline ends up with a CCodeWriter containing the
# code of the pxd, as well as a pxd scope.
return [
parse_pxd_stage_factory(context, scope, module_name)
] + create_pipeline(context, 'pxd') + [
ExtractPxdCode()
]
def create_py_pipeline(context, options, result):
return create_pyx_pipeline(context, options, result, py=True)
def create_pyx_as_pxd_pipeline(context, result):
from ParseTreeTransforms import AlignFunctionDefinitions, \
MarkClosureVisitor, WithTransform, AnalyseDeclarationsTransform
from Optimize import ConstantFolding, FlattenInListTransform
from Nodes import StatListNode
pipeline = []
pyx_pipeline = create_pyx_pipeline(context, context.options, result,
exclude_classes=[
AlignFunctionDefinitions,
MarkClosureVisitor,
ConstantFolding,
FlattenInListTransform,
WithTransform
])
for stage in pyx_pipeline:
pipeline.append(stage)
if isinstance(stage, AnalyseDeclarationsTransform):
# This is the last stage we need.
break
def fake_pxd(root):
for entry in root.scope.entries.values():
entry.defined_in_pxd = 1
return StatListNode(root.pos, stats=[]), root.scope
pipeline.append(fake_pxd)
return pipeline
def insert_into_pipeline(pipeline, transform, before=None, after=None):
"""
Insert a new transform into the pipeline after or before an instance of
the given class. e.g.
pipeline = insert_into_pipeline(pipeline, transform,
after=AnalyseDeclarationsTransform)
"""
assert before or after
cls = before or after
for i, t in enumerate(pipeline):
if isinstance(t, cls):
break
if after:
i += 1
return pipeline[:i] + [transform] + pipeline[i:]
#
# Running a pipeline
#
def run_pipeline(pipeline, source, printtree=True):
from Cython.Compiler.Visitor import PrintTree
error = None
data = source
try:
try:
for phase in pipeline:
if phase is not None:
if DebugFlags.debug_verbose_pipeline:
t = time()
print "Entering pipeline phase %r" % phase
if not printtree and isinstance(phase, PrintTree):
continue
data = phase(data)
if DebugFlags.debug_verbose_pipeline:
print " %.3f seconds" % (time() - t)
except CompileError, err:
# err is set
Errors.report_error(err)
error = err
except InternalError, err:
# Only raise if there was not an earlier error
if Errors.num_errors == 0:
raise
error = err
except AbortError, err:
error = err
return (error, data)
|
|
import pyqtgraph as pg
from neutronbraggedge.braggedge import BraggEdge
from ..step1.plot import Step1Plot
from ..utilities.retrieve_data_infos import RetrieveGeneralFileInfos, RetrieveGeneralDataInfos
from ..step1.math_utilities import calculate_delta_lambda
from ..utilities.gui_handler import GuiHandler
from .. import DataType
from .roi import Roi
class CustomAxis(pg.AxisItem):
def tickStrings(self, values, scale, spacing):
if 0 in values:
return []
return ['{:.4f}'.format(1. / i) for i in values]
class Step1GuiHandler(object):
def __init__(self, parent=None, data_type='sample'):
self.parent = parent
self.data_type = data_type
def initialize_rois_and_labels(self):
"""Reached when ROIs have not been manually initialized but loaded via a session"""
list_roi = self.parent.list_roi[self.data_type]
roi_function = None
if self.data_type == DataType.sample:
roi_function = self.parent.roi_image_view_changed
image_view = self.parent.ui.image_view
method = self.parent.roi_image_view_changed
elif self.data_type == DataType.ob:
roi_function = self.parent.roi_ob_image_view_changed
image_view = self.parent.ui.ob_image_view
method = self.parent.roi_ob_image_view_changed
elif self.data_type == DataType.normalized:
roi_function = self.parent.roi_normalized_image_view_changed
image_view = self.parent.ui.normalized_image_view
method = self.parent.roi_normalized_image_view_changed
already_existing_list_roi = self.parent.list_roi_id[self.data_type]
already_existing_list_label_roi = self.parent.list_label_roi_id[self.data_type]
for _roi_id, _label_id in zip(already_existing_list_roi, already_existing_list_label_roi):
image_view.removeItem(_roi_id)
image_view.removeItem(_label_id)
list_roi_id = Roi.setup_roi_id(list_roi=list_roi, roi_function=roi_function)
self.parent.list_roi_id[self.data_type] = list_roi_id
list_label_roi_id = []
for _roi, _roi_id in zip(list_roi, list_roi_id):
label = _roi[0]
x0 = int(_roi[1])
y0 = int(_roi[2])
# label roi
label_roi = pg.TextItem(
html='<div style="text-align: center"><span style="color: #ff0000;">' + label + '</span></div>',
anchor=(-0.3, 1.3),
border='w',
fill=(0, 0, 255, 50))
# # roi region in image
# roi = pg.ROI([x0, y0], [width, height])
# roi.addScaleHandle([1, 1], [0, 0])
image_view.addItem(_roi_id)
image_view.addItem(label_roi)
label_roi.setPos(x0, y0)
_roi_id.sigRegionChanged.connect(method)
list_label_roi_id.append(label_roi)
self.parent.list_label_roi_id[self.data_type] = list_label_roi_id
def sync_instrument_widgets(self, source='load_data'):
target = 'normalized'
if source == 'normalized':
target = 'load_data'
list_ui = {'load_data': {'distance': self.parent.ui.distance_source_detector,
'beam': self.parent.ui.beam_rate,
'detector': self.parent.ui.detector_offset},
'normalized': {'distance': self.parent.ui.distance_source_detector_2,
'beam': self.parent.ui.beam_rate_2,
'detector': self.parent.ui.detector_offset_2}}
o_gui = GuiHandler(parent=self.parent)
distance_value = o_gui.get_text(ui=list_ui[source]['distance'])
detector_value = o_gui.get_text(ui=list_ui[source]['detector'])
beam_index = o_gui.get_index_selected(ui=list_ui[source]['beam'])
o_gui.set_text(value=distance_value, ui=list_ui[target]['distance'])
o_gui.set_text(value=detector_value, ui=list_ui[target]['detector'])
o_gui.set_index_selected(index=beam_index, ui=list_ui[target]['beam'])
def load_data_tab_changed(self, tab_index=0):
data_type = 'sample'
if tab_index == 0:
# data_preview_box_label = "Sample Image Preview"
o_general_infos = RetrieveGeneralFileInfos(parent=self.parent,
data_type='sample')
o_selected_infos = RetrieveGeneralDataInfos(parent=self.parent,
data_type='sample')
else:
# data_preview_box_label = "Open Beam Image Preview"
o_general_infos = RetrieveGeneralFileInfos(parent=self.parent,
data_type='ob')
o_selected_infos = RetrieveGeneralDataInfos(parent=self.parent,
data_type='ob')
data_type = 'ob'
o_general_infos.update()
o_selected_infos.update()
row_selected = self.row_selected(data_type=data_type)
data = self.parent.data_metadata[data_type]['data']
if not data == []:
data = data[row_selected]
o_gui = Step1Plot(parent=self.parent,
data_type=data_type,
data=data)
o_gui.all_plots()
def row_selected(self, data_type='sample'):
return self.parent.data_metadata[data_type]['list_widget_ui'].currentRow()
def get_element_selected(self, source='load_data'):
if source == 'load_data':
return str(self.parent.ui.list_of_elements.currentText())
else:
return str(self.parent.ui.list_of_elements_2.currentText())
def set_crystal_structure(self, new_crystal_structure):
nbr_item = self.parent.ui.crystal_structure.count()
for _row in range(nbr_item):
_item_of_row = self.parent.ui.crystal_structure.itemText(_row)
if _item_of_row == new_crystal_structure:
self.parent.ui.crystal_structure.setCurrentIndex(_row)
self.parent.ui.crystal_structure_2.setCurrentIndex(_row)
def retrieve_handler_from_local_bragg_edge_list(self, material=None):
'''
Look if the material is in the local list of Bragg edge and if it is,
return the dictionary of that material
'''
if material is None:
return None
_local_bragg_edge_list = self.parent.local_bragg_edge_list
if material in _local_bragg_edge_list.keys():
return _local_bragg_edge_list[material]
def add_element_to_local_bragg_edge_list(self, material=None):
'''
Add a new material into the local bragg edge list
new entry will be
'material': {'crystal_structure': '', 'lattice': -1}
'''
if material is None:
return None
o_gui = GuiHandler(parent=self.parent)
_crystal_structure = o_gui.get_text_selected(ui=self.parent.ui.crystal_structure)
_lattice = o_gui.get_text(ui=self.parent.ui.lattice_parameter)
self.parent.local_bragg_edge_list[material] = {'crystal_structure': _crystal_structure,
'lattice': _lattice}
def update_lattice_and_crystal_when_index_selected(self, source='load_data',
fill_lattice_flag=True,
fill_crystal_structure_flag=True):
_element = self.get_element_selected(source=source)
try:
_handler = BraggEdge(material=_element)
_crystal_structure = _handler.metadata['crystal_structure'][_element]
_lattice = str(_handler.metadata['lattice'][_element])
except KeyError:
# new element
if source == 'load_data':
_lattice = str(self.parent.ui.lattice_parameter.text())
_index = self.parent.ui.list_of_elements.currentIndex()
self.parent.ui.list_of_elements_2.addItem(_element)
self.parent.ui.list_of_elements_2.setCurrentIndex(_index)
self.parent.ui.lattice_parameter_2.setText(_lattice)
else:
_lattice = str(self.parent.ui.lattice_parameter_2.text())
_index = self.parent.ui.list_of_elements_2.currentIndex()
self.parent.ui.list_of_elements.addItem(_element)
self.parent.ui.list_of_elements.setCurrentIndex(_index)
self.parent.ui.lattice_parameter.setText(_lattice)
return
# except KeyError:
#
# # look for element in local list of element
# _handler = self.retrieve_handler_from_local_bragg_edge_list(material=_element)
# _crystal_structure = _handler['crystal_structure']
# _lattice = _handler['lattice']
if source == 'load_data':
_index = self.parent.ui.list_of_elements.currentIndex()
self.parent.ui.list_of_elements_2.setCurrentIndex(_index)
else:
_index = self.parent.ui.list_of_elements_2.currentIndex()
self.parent.ui.list_of_elements.setCurrentIndex(_index)
if fill_lattice_flag:
self.parent.ui.lattice_parameter.setText(_lattice)
self.parent.ui.lattice_parameter_2.setText(_lattice)
if fill_crystal_structure_flag:
self.set_crystal_structure(_crystal_structure)
def select_load_data_row(self, data_type='sample', row=0):
if data_type == 'sample':
self.parent.ui.list_sample.setCurrentRow(row)
elif data_type == 'ob':
self.parent.ui.list_open_beam.setCurrentRow(row)
elif data_type == 'normalized':
self.parent.ui.list_normalized.setCurrentRow(row)
def update_delta_lambda(self):
distance_source_detector = float(str(self.parent.ui.distance_source_detector.text()))
frequency = float(str(self.parent.ui.beam_rate.currentText()))
delta_lambda = calculate_delta_lambda(
distance_source_detector=distance_source_detector,
frequency=frequency)
self.parent.ui.delta_lambda_value.setText("{:.2f}".format(delta_lambda))
self.parent.ui.delta_lambda_value_2.setText("{:.2f}".format(delta_lambda))
def check_step1_widgets(self):
if self.parent.data_metadata[self.data_type]['data'].any():
self.parent.ui.toolBox.setItemEnabled(1, True)
self.parent.ui.tabWidget.setTabEnabled(1, True)
def check_time_spectra_widgets(self):
time_spectra_data = self.parent.data_metadata['time_spectra']['data']
if self.parent.ui.material_display_checkbox.isChecked():
if time_spectra_data == []:
_display_error_label = True
else:
_display_error_label = False
else:
_display_error_label = False
self.parent.ui.display_warning.setVisible(_display_error_label)
def block_instrument_widgets(self, status=True):
self.parent.ui.detector_offset.blockSignals(status)
self.parent.ui.detector_offset_2.blockSignals(status)
self.parent.ui.distance_source_detector.blockSignals(status)
self.parent.ui.distance_source_detector_2.blockSignals(status)
self.parent.ui.beam_rate.blockSignals(status)
self.parent.ui.beam_rate_2.blockSignals(status)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.gkehub_v1beta1.services.gke_hub_membership_service import pagers
from google.cloud.gkehub_v1beta1.types import membership
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import GkeHubMembershipServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import GkeHubMembershipServiceGrpcAsyncIOTransport
from .client import GkeHubMembershipServiceClient
class GkeHubMembershipServiceAsyncClient:
"""The GKE Hub MembershipService handles the registration of many
Kubernetes clusters to Google Cloud, represented with the
[Membership][google.cloud.gkehub.v1beta1.Membership] resource.
GKE Hub is currently only available in the global region.
**Membership management may be non-trivial:** it is recommended to
use one of the Google-provided client libraries or tools where
possible when working with Membership resources.
"""
_client: GkeHubMembershipServiceClient
DEFAULT_ENDPOINT = GkeHubMembershipServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = GkeHubMembershipServiceClient.DEFAULT_MTLS_ENDPOINT
membership_path = staticmethod(GkeHubMembershipServiceClient.membership_path)
parse_membership_path = staticmethod(
GkeHubMembershipServiceClient.parse_membership_path
)
common_billing_account_path = staticmethod(
GkeHubMembershipServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
GkeHubMembershipServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(GkeHubMembershipServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
GkeHubMembershipServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
GkeHubMembershipServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
GkeHubMembershipServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(
GkeHubMembershipServiceClient.common_project_path
)
parse_common_project_path = staticmethod(
GkeHubMembershipServiceClient.parse_common_project_path
)
common_location_path = staticmethod(
GkeHubMembershipServiceClient.common_location_path
)
parse_common_location_path = staticmethod(
GkeHubMembershipServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GkeHubMembershipServiceAsyncClient: The constructed client.
"""
return GkeHubMembershipServiceClient.from_service_account_info.__func__(GkeHubMembershipServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GkeHubMembershipServiceAsyncClient: The constructed client.
"""
return GkeHubMembershipServiceClient.from_service_account_file.__func__(GkeHubMembershipServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return GkeHubMembershipServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> GkeHubMembershipServiceTransport:
"""Returns the transport used by the client instance.
Returns:
GkeHubMembershipServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(GkeHubMembershipServiceClient).get_transport_class,
type(GkeHubMembershipServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, GkeHubMembershipServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the gke hub membership service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.GkeHubMembershipServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = GkeHubMembershipServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_memberships(
self,
request: Union[membership.ListMembershipsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMembershipsAsyncPager:
r"""Lists Memberships in a given project and location.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_list_memberships():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.ListMembershipsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_memberships(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.ListMembershipsRequest, dict]):
The request object. Request message for
`GkeHubMembershipService.ListMemberships` method.
parent (:class:`str`):
Required. The parent (project and location) where the
Memberships will be listed. Specified in the format
``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gkehub_v1beta1.services.gke_hub_membership_service.pagers.ListMembershipsAsyncPager:
Response message for the
GkeHubMembershipService.ListMemberships method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = membership.ListMembershipsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_memberships,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMembershipsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_membership(
self,
request: Union[membership.GetMembershipRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> membership.Membership:
r"""Gets the details of a Membership.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_get_membership():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.GetMembershipRequest(
name="name_value",
)
# Make the request
response = client.get_membership(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.GetMembershipRequest, dict]):
The request object. Request message for
`GkeHubMembershipService.GetMembership` method.
name (:class:`str`):
Required. The Membership resource name in the format
``projects/*/locations/*/memberships/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gkehub_v1beta1.types.Membership:
Membership contains information about
a member cluster.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = membership.GetMembershipRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_membership,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_membership(
self,
request: Union[membership.CreateMembershipRequest, dict] = None,
*,
parent: str = None,
resource: membership.Membership = None,
membership_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Membership.
**This is currently only supported for GKE clusters on Google
Cloud**. To register other clusters, follow the instructions at
https://cloud.google.com/anthos/multicluster-management/connect/registering-a-cluster.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_create_membership():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.CreateMembershipRequest(
parent="parent_value",
membership_id="membership_id_value",
)
# Make the request
operation = client.create_membership(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.CreateMembershipRequest, dict]):
The request object. Request message for the
`GkeHubMembershipService.CreateMembership` method.
parent (:class:`str`):
Required. The parent (project and location) where the
Memberships will be created. Specified in the format
``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (:class:`google.cloud.gkehub_v1beta1.types.Membership`):
Required. The membership to create.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
membership_id (:class:`str`):
Required. Client chosen ID for the membership.
``membership_id`` must be a valid RFC 1123 compliant DNS
label:
1. At most 63 characters in length
2. It must consist of lower case alphanumeric characters
or ``-``
3. It must start and end with an alphanumeric character
Which can be expressed as the regex:
``[a-z0-9]([-a-z0-9]*[a-z0-9])?``, with a maximum length
of 63 characters.
This corresponds to the ``membership_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gkehub_v1beta1.types.Membership`
Membership contains information about a member cluster.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, resource, membership_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = membership.CreateMembershipRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if resource is not None:
request.resource = resource
if membership_id is not None:
request.membership_id = membership_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_membership,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
membership.Membership,
metadata_type=membership.OperationMetadata,
)
# Done; return the response.
return response
async def delete_membership(
self,
request: Union[membership.DeleteMembershipRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Removes a Membership.
**This is currently only supported for GKE clusters on Google
Cloud**. To unregister other clusters, follow the instructions
at
https://cloud.google.com/anthos/multicluster-management/connect/unregistering-a-cluster.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_delete_membership():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.DeleteMembershipRequest(
name="name_value",
)
# Make the request
operation = client.delete_membership(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.DeleteMembershipRequest, dict]):
The request object. Request message for
`GkeHubMembershipService.DeleteMembership` method.
name (:class:`str`):
Required. The Membership resource name in the format
``projects/*/locations/*/memberships/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = membership.DeleteMembershipRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_membership,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=membership.OperationMetadata,
)
# Done; return the response.
return response
async def update_membership(
self,
request: Union[membership.UpdateMembershipRequest, dict] = None,
*,
name: str = None,
resource: membership.Membership = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates an existing Membership.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_update_membership():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.UpdateMembershipRequest(
name="name_value",
)
# Make the request
operation = client.update_membership(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.UpdateMembershipRequest, dict]):
The request object. Request message for
`GkeHubMembershipService.UpdateMembership` method.
name (:class:`str`):
Required. The membership resource name in the format:
``projects/[project_id]/locations/global/memberships/[membership_id]``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (:class:`google.cloud.gkehub_v1beta1.types.Membership`):
Required. Only fields specified in update_mask are
updated. If you specify a field in the update_mask but
don't specify its value here that field will be deleted.
If you are updating a map field, set the value of a key
to null or empty string to delete the key from the map.
It's not possible to update a key's value to the empty
string. If you specify the update_mask to be a special
path "*", fully replaces all user-modifiable fields to
match ``resource``.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. Mask of fields to update.
At least one field path must be
specified in this mask.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gkehub_v1beta1.types.Membership`
Membership contains information about a member cluster.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, resource, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = membership.UpdateMembershipRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if resource is not None:
request.resource = resource
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_membership,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
membership.Membership,
metadata_type=membership.OperationMetadata,
)
# Done; return the response.
return response
async def generate_connect_manifest(
self,
request: Union[membership.GenerateConnectManifestRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> membership.GenerateConnectManifestResponse:
r"""Generates the manifest for deployment of the GKE connect agent.
**This method is used internally by Google-provided libraries.**
Most clients should not need to call this method directly.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_generate_connect_manifest():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.GenerateConnectManifestRequest(
name="name_value",
)
# Make the request
response = client.generate_connect_manifest(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.GenerateConnectManifestRequest, dict]):
The request object. Request message for
`GkeHubMembershipService.GenerateConnectManifest`
method. .
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gkehub_v1beta1.types.GenerateConnectManifestResponse:
GenerateConnectManifestResponse
contains manifest information for
installing/upgrading a Connect agent.
"""
# Create or coerce a protobuf request object.
request = membership.GenerateConnectManifestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.generate_connect_manifest,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def validate_exclusivity(
self,
request: Union[membership.ValidateExclusivityRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> membership.ValidateExclusivityResponse:
r"""ValidateExclusivity validates the state of
exclusivity in the cluster. The validation does not
depend on an existing Hub membership resource.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_validate_exclusivity():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.ValidateExclusivityRequest(
parent="parent_value",
intended_membership="intended_membership_value",
)
# Make the request
response = client.validate_exclusivity(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.ValidateExclusivityRequest, dict]):
The request object. The request to validate the existing
state of the membership CR in the cluster.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gkehub_v1beta1.types.ValidateExclusivityResponse:
The response of exclusivity artifacts
validation result status.
"""
# Create or coerce a protobuf request object.
request = membership.ValidateExclusivityRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.validate_exclusivity,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def generate_exclusivity_manifest(
self,
request: Union[membership.GenerateExclusivityManifestRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> membership.GenerateExclusivityManifestResponse:
r"""GenerateExclusivityManifest generates the manifests
to update the exclusivity artifacts in the cluster if
needed.
Exclusivity artifacts include the Membership custom
resource definition (CRD) and the singleton Membership
custom resource (CR). Combined with ValidateExclusivity,
exclusivity artifacts guarantee that a Kubernetes
cluster is only registered to a single GKE Hub.
The Membership CRD is versioned, and may require
conversion when the GKE Hub API server begins serving a
newer version of the CRD and corresponding CR. The
response will be the converted CRD and CR if there are
any differences between the versions.
.. code-block:: python
from google.cloud import gkehub_v1beta1
def sample_generate_exclusivity_manifest():
# Create a client
client = gkehub_v1beta1.GkeHubMembershipServiceClient()
# Initialize request argument(s)
request = gkehub_v1beta1.GenerateExclusivityManifestRequest(
name="name_value",
)
# Make the request
response = client.generate_exclusivity_manifest(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.gkehub_v1beta1.types.GenerateExclusivityManifestRequest, dict]):
The request object. The request to generate the
manifests for exclusivity artifacts.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gkehub_v1beta1.types.GenerateExclusivityManifestResponse:
The response of the exclusivity
artifacts manifests for the client to
apply.
"""
# Create or coerce a protobuf request object.
request = membership.GenerateExclusivityManifestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.generate_exclusivity_manifest,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-gke-hub",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("GkeHubMembershipServiceAsyncClient",)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
import numpy as np
import pytest
import pyarrow as pa
from pyarrow import fs
from pyarrow.filesystem import LocalFileSystem
from pyarrow.tests import util
from pyarrow.tests.parquet.common import (
parametrize_legacy_dataset, parametrize_legacy_dataset_fixed,
parametrize_legacy_dataset_not_supported)
from pyarrow.util import guid
from pyarrow.vendored.version import Version
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import (
_read_table, _test_dataframe, _write_table)
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.6')
with pytest.warns(DeprecationWarning):
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.6')
with pytest.warns(DeprecationWarning):
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table.equals(table1)
@pytest.mark.filterwarnings("ignore:ParquetDatasetPiece:DeprecationWarning")
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', ['foo', 'bar', 'baz'])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@parametrize_legacy_dataset_fixed
def test_filesystem_uri(tempdir, use_legacy_dataset):
table = pa.table({"a": [1, 2, 3]})
directory = tempdir / "data_dir"
directory.mkdir()
path = directory / "data.parquet"
pq.write_table(table, str(path))
# filesystem object
result = pq.read_table(
path, filesystem=fs.LocalFileSystem(),
use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
# filesystem URI
result = pq.read_table(
"data_dir/data.parquet", filesystem=util._filesystem_uri(tempdir),
use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_partitioned_directory(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
_partition_test_for_filesystem(fs, tempdir, use_legacy_dataset)
@pytest.mark.filterwarnings("ignore:'ParquetDataset:DeprecationWarning")
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem._get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_partitioned_columns_selection(tempdir, use_legacy_dataset):
# ARROW-3861 - do not include partition columns in resulting table when
# `columns` keyword was passed without those columns
fs = LocalFileSystem._get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
dataset = pq.ParquetDataset(
base_path, use_legacy_dataset=use_legacy_dataset)
result = dataset.read(columns=["values"])
if use_legacy_dataset:
# ParquetDataset implementation always includes the partition columns
# automatically, and we can't easily "fix" this since dask relies on
# this behaviour (ARROW-8644)
assert result.column_names == ["values", "foo", "bar"]
else:
assert result.column_names == ["values"]
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_equivalency(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
# Old filters syntax:
# integer == 1 AND string != b AND boolean == True
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', 'True')],
use_legacy_dataset=use_legacy_dataset,
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
# filters in disjunctive normal form:
# (integer == 1 AND string != b AND boolean == True) OR
# (integer == 2 AND boolean == False)
# TODO(ARROW-3388): boolean columns are reconstructed as string
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(
base_path, filesystem=fs, filters=filters,
use_legacy_dataset=use_legacy_dataset)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
# Check that all rows in the DF fulfill the filter
# Pandas 0.23.x has problems with indexing constant memoryviews in
# categoricals. Thus we need to make an explicit copy here with np.array.
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
if use_legacy_dataset:
# Check for \0 in predicate values. Until they are correctly
# implemented in ARROW-3391, they would otherwise lead to weird
# results with the current code.
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', '1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
else:
for filters in [[[('string', '==', b'1\0a')]],
[[('string', '==', '1\0a')]]]:
dataset = pq.ParquetDataset(
base_path, filesystem=fs, filters=filters,
use_legacy_dataset=False)
assert dataset.read().num_rows == 0
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_cutoff_exclusive_integer(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@parametrize_legacy_dataset
@pytest.mark.xfail(
# different error with use_legacy_datasets because result_df is no longer
# categorical
raises=(TypeError, AssertionError),
reason='Loss of type information in creation of categoricals.'
)
def test_filters_cutoff_exclusive_datetime(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
@pytest.mark.dataset
def test_filters_inclusive_datetime(tempdir):
# ARROW-11480
path = tempdir / 'timestamps.parquet'
pd.DataFrame({
"dates": pd.date_range("2020-01-01", periods=10, freq="D"),
"id": range(10)
}).to_parquet(path, use_deprecated_int96_timestamps=True)
table = pq.read_table(path, filters=[
("dates", "<=", datetime.datetime(2020, 1, 5))
])
assert table.column('id').to_pylist() == [0, 1, 2, 3, 4]
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_inclusive_integer(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_inclusive_set(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('string', 'in', 'ab')],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 'a' in result_df['string'].values
assert 'b' in result_df['string'].values
assert 'c' not in result_df['string'].values
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', [1]), ('string', 'in', ('a', 'b')),
('boolean', 'not in', {False})],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_invalid_pred_op(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(TypeError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', 'in', 3), ],
use_legacy_dataset=use_legacy_dataset)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', '=<', 3), ],
use_legacy_dataset=use_legacy_dataset)
if use_legacy_dataset:
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', 'in', set()), ],
use_legacy_dataset=use_legacy_dataset)
else:
# Dataset API returns empty table instead
dataset = pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', 'in', set()), ],
use_legacy_dataset=use_legacy_dataset)
assert dataset.read().num_rows == 0
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', '!=', {3})],
use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset_fixed
def test_filters_invalid_column(tempdir, use_legacy_dataset):
# ARROW-5572 - raise error on invalid name in filter specification
# works with new dataset / xfail with legacy implementation
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [['integers', integer_keys]]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
msg = r"No match for FieldRef.Name\(non_existent_column\)"
with pytest.raises(ValueError, match=msg):
pq.ParquetDataset(base_path, filesystem=fs,
filters=[('non_existent_column', '<', 3), ],
use_legacy_dataset=use_legacy_dataset).read()
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_read_table(tempdir, use_legacy_dataset):
# test that filters keyword is passed through in read_table
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)],
use_legacy_dataset=use_legacy_dataset)
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]],
use_legacy_dataset=use_legacy_dataset)
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)],
use_legacy_dataset=use_legacy_dataset)
assert table.num_rows == 3
@pytest.mark.pandas
@parametrize_legacy_dataset_fixed
def test_partition_keys_with_underscores(tempdir, use_legacy_dataset):
# ARROW-5666 - partition field values with underscores preserve underscores
# xfail with legacy dataset -> they get interpreted as integers
fs = LocalFileSystem._get_instance()
base_path = tempdir
string_keys = ["2019_2", "2019_3"]
partition_spec = [
['year_week', string_keys],
]
N = 2
df = pd.DataFrame({
'index': np.arange(N),
'year_week': np.array(string_keys, dtype='object'),
}, columns=['index', 'year_week'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, use_legacy_dataset=use_legacy_dataset)
result = dataset.read()
assert result.column("year_week").to_pylist() == string_keys
@pytest.mark.s3
@parametrize_legacy_dataset
def test_read_s3fs(s3_example_s3fs, use_legacy_dataset):
fs, path = s3_example_s3fs
path = path + "/test.parquet"
table = pa.table({"a": [1, 2, 3]})
_write_table(table, path, filesystem=fs)
result = _read_table(
path, filesystem=fs, use_legacy_dataset=use_legacy_dataset
)
assert result.equals(table)
@pytest.mark.s3
@parametrize_legacy_dataset
def test_read_directory_s3fs(s3_example_s3fs, use_legacy_dataset):
fs, directory = s3_example_s3fs
path = directory + "/test.parquet"
table = pa.table({"a": [1, 2, 3]})
_write_table(table, path, filesystem=fs)
result = _read_table(
directory, filesystem=fs, use_legacy_dataset=use_legacy_dataset
)
assert result.equals(table)
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_read_partitioned_directory_s3fs_wrapper(
s3_example_s3fs, use_legacy_dataset
):
import s3fs
from pyarrow.filesystem import S3FSWrapper
if Version(s3fs.__version__) >= Version("0.5"):
pytest.skip("S3FSWrapper no longer working for s3fs 0.5+")
fs, path = s3_example_s3fs
with pytest.warns(FutureWarning):
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, path)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(
path, filesystem=fs, use_legacy_dataset=use_legacy_dataset
)
dataset.read()
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_read_partitioned_directory_s3fs(s3_example_s3fs, use_legacy_dataset):
fs, path = s3_example_s3fs
_partition_test_for_filesystem(
fs, path, use_legacy_dataset=use_legacy_dataset
)
def _partition_test_for_filesystem(fs, base_path, use_legacy_dataset=True):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs, use_legacy_dataset=use_legacy_dataset)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
pathsep = getattr(fs, "pathsep", getattr(fs, "sep", "/"))
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = pathsep.join([
str(base_dir),
'{}={}'.format(name, value)
])
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = pathsep.join([level_dir, guid()])
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
file_success = pathsep.join([level_dir, '_SUCCESS'])
with fs.open(file_success, 'wb') as f:
pass
else:
_visit_level(level_dir, level + 1, this_part_keys)
file_success = pathsep.join([level_dir, '_SUCCESS'])
with fs.open(file_success, 'wb') as f:
pass
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
import pandas as pd
import pyarrow.parquet as pq
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem._get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem._get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
# to avoid pandas warning
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@parametrize_legacy_dataset
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir, use_legacy_dataset):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]],
use_legacy_dataset=use_legacy_dataset)
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_multiple_files(tempdir, use_legacy_dataset):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(
paths, use_legacy_dataset=use_legacy_dataset, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
# TODO(dataset) specifying metadata not yet supported
metadata = pq.read_metadata(paths[0])
if use_legacy_dataset:
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pq.ParquetDataset(dirpath, schema=metadata.schema).read()
assert result3.equals(expected)
else:
with pytest.raises(ValueError, match="no longer supported"):
pq.read_table(paths, metadata=metadata, use_legacy_dataset=False)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pq.read_table(
dirpath, columns=col_names, use_legacy_dataset=use_legacy_dataset
)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pq.read_table(
dirpath, use_threads=True, use_legacy_dataset=use_legacy_dataset
)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
if not use_legacy_dataset:
# TODO(dataset) Dataset API skips bad files
return
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_read_pandas(tempdir, use_legacy_dataset):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
# also be able to pass the columns as a set (ARROW-12314)
result = dataset.read_pandas(columns=set(columns)).to_pandas()
assert result.shape == expected.shape
# column order can be different because of using a set
tm.assert_frame_equal(result.reindex(columns=expected.columns), expected)
@pytest.mark.filterwarnings("ignore:'ParquetDataset:DeprecationWarning")
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_memory_map(tempdir, use_legacy_dataset):
# ARROW-2627: Check that we can use ParquetDataset with memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.6')
dataset = pq.ParquetDataset(
dirpath, memory_map=True, use_legacy_dataset=use_legacy_dataset)
assert dataset.read().equals(table)
if use_legacy_dataset:
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_enable_buffered_stream(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.6')
with pytest.raises(ValueError):
pq.ParquetDataset(
dirpath, buffer_size=-64,
use_legacy_dataset=use_legacy_dataset)
for buffer_size in [128, 1024]:
dataset = pq.ParquetDataset(
dirpath, buffer_size=buffer_size,
use_legacy_dataset=use_legacy_dataset)
assert dataset.read().equals(table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_enable_pre_buffer(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.6')
for pre_buffer in (True, False):
dataset = pq.ParquetDataset(
dirpath, pre_buffer=pre_buffer,
use_legacy_dataset=use_legacy_dataset)
assert dataset.read().equals(table)
actual = pq.read_table(dirpath, pre_buffer=pre_buffer,
use_legacy_dataset=use_legacy_dataset)
assert actual.equals(table)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
def _assert_dataset_paths(dataset, paths, use_legacy_dataset):
if use_legacy_dataset:
assert set(map(str, paths)) == {x.path for x in dataset._pieces}
else:
paths = [str(path.as_posix()) for path in paths]
assert set(paths) == set(dataset._dataset.files)
@pytest.mark.pandas
@parametrize_legacy_dataset
@pytest.mark.parametrize('dir_prefix', ['_', '.'])
def test_ignore_private_directories(tempdir, dir_prefix, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '{}staging'.format(dir_prefix)).mkdir()
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_ignore_hidden_files_dot(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_ignore_hidden_files_underscore(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
@pytest.mark.parametrize('dir_prefix', ['_', '.'])
def test_ignore_no_private_directories_in_base_path(
tempdir, dir_prefix, use_legacy_dataset
):
# ARROW-8427 - don't ignore explicitly listed files if parent directory
# is a private directory
dirpath = tempdir / "{0}data".format(dir_prefix) / guid()
dirpath.mkdir(parents=True)
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
dataset = pq.ParquetDataset(paths, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
# ARROW-9644 - don't ignore full directory with underscore in base path
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset_fixed
def test_ignore_custom_prefixes(tempdir, use_legacy_dataset):
# ARROW-9573 - allow override of default ignore_prefixes
part = ["xxx"] * 3 + ["yyy"] * 3
table = pa.table([
pa.array(range(len(part))),
pa.array(part).dictionary_encode(),
], names=['index', '_part'])
# TODO use_legacy_dataset ARROW-10247
pq.write_to_dataset(table, str(tempdir), partition_cols=['_part'])
private_duplicate = tempdir / '_private_duplicate'
private_duplicate.mkdir()
pq.write_to_dataset(table, str(private_duplicate),
partition_cols=['_part'])
read = pq.read_table(
tempdir, use_legacy_dataset=use_legacy_dataset,
ignore_prefixes=['_private'])
assert read.equals(table)
@parametrize_legacy_dataset_fixed
def test_empty_directory(tempdir, use_legacy_dataset):
# ARROW-5310 - reading empty directory
# fails with legacy implementation
empty_dir = tempdir / 'dataset'
empty_dir.mkdir()
dataset = pq.ParquetDataset(
empty_dir, use_legacy_dataset=use_legacy_dataset)
result = dataset.read()
assert result.num_rows == 0
assert result.num_columns == 0
def _test_write_to_dataset_with_partitions(base_path,
use_legacy_dataset=True,
filesystem=None,
schema=None,
index_name=None):
import pandas as pd
import pandas.testing as tm
import pyarrow.parquet as pq
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem,
use_legacy_dataset=use_legacy_dataset)
metadata_path = os.path.join(str(base_path), '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True,
use_legacy_dataset=use_legacy_dataset)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
if use_legacy_dataset:
dataset_cols = set(dataset.schema.to_arrow_schema().names)
else:
# NB schema property is an arrow and not parquet schema
dataset_cols = set(dataset.schema.names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
input_df = input_df[cols]
# Partitioned columns become 'categorical' dtypes
for col in partition_by:
output_df[col] = output_df[col].astype('category')
tm.assert_frame_equal(output_df, input_df)
def _test_write_to_dataset_no_partitions(base_path,
use_legacy_dataset=True,
filesystem=None):
import pandas as pd
import pyarrow.parquet as pq
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem._get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(str(base_path))
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(
base_path, filesystem=filesystem,
use_legacy_dataset=use_legacy_dataset
).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions(tempdir, use_legacy_dataset):
_test_write_to_dataset_with_partitions(str(tempdir), use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions_and_schema(
tempdir, use_legacy_dataset
):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(
str(tempdir), use_legacy_dataset, schema=schema)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions_and_index_name(
tempdir, use_legacy_dataset
):
_test_write_to_dataset_with_partitions(
str(tempdir), use_legacy_dataset, index_name='index_name')
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_no_partitions(tempdir, use_legacy_dataset):
_test_write_to_dataset_no_partitions(str(tempdir), use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_pathlib(tempdir, use_legacy_dataset):
_test_write_to_dataset_with_partitions(
tempdir / "test1", use_legacy_dataset)
_test_write_to_dataset_no_partitions(
tempdir / "test2", use_legacy_dataset)
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_write_to_dataset_pathlib_nonlocal(
tempdir, s3_example_s3fs, use_legacy_dataset
):
# pathlib paths are only accepted for local files
fs, _ = s3_example_s3fs
with pytest.raises(TypeError, match="path-like objects are only allowed"):
_test_write_to_dataset_with_partitions(
tempdir / "test1", use_legacy_dataset, filesystem=fs)
with pytest.raises(TypeError, match="path-like objects are only allowed"):
_test_write_to_dataset_no_partitions(
tempdir / "test2", use_legacy_dataset, filesystem=fs)
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions_s3fs(
s3_example_s3fs, use_legacy_dataset
):
fs, path = s3_example_s3fs
_test_write_to_dataset_with_partitions(
path, use_legacy_dataset, filesystem=fs)
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_write_to_dataset_no_partitions_s3fs(
s3_example_s3fs, use_legacy_dataset
):
fs, path = s3_example_s3fs
_test_write_to_dataset_no_partitions(
path, use_legacy_dataset, filesystem=fs)
@pytest.mark.filterwarnings("ignore:'ParquetDataset:DeprecationWarning")
@pytest.mark.pandas
@parametrize_legacy_dataset_not_supported
def test_write_to_dataset_with_partitions_and_custom_filenames(
tempdir, use_legacy_dataset
):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{}-{}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback,
use_legacy_dataset=use_legacy_dataset)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.dataset
@pytest.mark.pandas
def test_write_to_dataset_filesystem(tempdir):
df = pd.DataFrame({'A': [1, 2, 3]})
table = pa.Table.from_pandas(df)
path = str(tempdir)
pq.write_to_dataset(table, path, filesystem=fs.LocalFileSystem())
result = pq.read_table(path)
assert result.equals(table)
# TODO(dataset) support pickling
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem._get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
def _assert_dataset_is_picklable(dataset, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset._pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_builtin_pickle_dataset(tempdir, datadir):
import pickle
dataset = _make_dataset_for_pickling(tempdir)
_assert_dataset_is_picklable(dataset, pickler=pickle)
@pytest.mark.pandas
def test_cloudpickle_dataset(tempdir, datadir):
cp = pytest.importorskip('cloudpickle')
dataset = _make_dataset_for_pickling(tempdir)
_assert_dataset_is_picklable(dataset, pickler=cp)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_partitioned_dataset(tempdir, use_legacy_dataset):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(
path, use_legacy_dataset=use_legacy_dataset).read()
pq.write_table(table, path / "output.parquet")
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_read_dictionary(tempdir, use_legacy_dataset):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[util.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[util.rands(10) for i in range(5)] * 10], names=['f0'])
# TODO pass use_legacy_dataset (need to fix unique names)
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(
path, read_dictionary=['f0'],
use_legacy_dataset=use_legacy_dataset).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
@pytest.mark.dataset
def test_dataset_unsupported_keywords():
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, schema=pa.schema([]))
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, metadata=pa.schema([]))
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, validate_schema=False)
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, split_row_groups=True)
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, metadata_nthreads=4)
with pytest.raises(ValueError, match="no longer supported"):
pq.read_table("", use_legacy_dataset=False, metadata=pa.schema([]))
@pytest.mark.dataset
def test_dataset_partitioning(tempdir):
import pyarrow.dataset as ds
# create small dataset with directory partitioning
root_path = tempdir / "test_partitioning"
(root_path / "2012" / "10" / "01").mkdir(parents=True)
table = pa.table({'a': [1, 2, 3]})
pq.write_table(
table, str(root_path / "2012" / "10" / "01" / "data.parquet"))
# This works with new dataset API
# read_table
part = ds.partitioning(field_names=["year", "month", "day"])
result = pq.read_table(
str(root_path), partitioning=part, use_legacy_dataset=False)
assert result.column_names == ["a", "year", "month", "day"]
result = pq.ParquetDataset(
str(root_path), partitioning=part, use_legacy_dataset=False).read()
assert result.column_names == ["a", "year", "month", "day"]
# This raises an error for legacy dataset
with pytest.raises(ValueError):
pq.read_table(
str(root_path), partitioning=part, use_legacy_dataset=True)
with pytest.raises(ValueError):
pq.ParquetDataset(
str(root_path), partitioning=part, use_legacy_dataset=True)
@pytest.mark.dataset
def test_parquet_dataset_new_filesystem(tempdir):
# Ensure we can pass new FileSystem object to ParquetDataset
# (use new implementation automatically without specifying
# use_legacy_dataset=False)
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, tempdir / 'data.parquet')
# don't use simple LocalFileSystem (as that gets mapped to legacy one)
filesystem = fs.SubTreeFileSystem(str(tempdir), fs.LocalFileSystem())
dataset = pq.ParquetDataset('.', filesystem=filesystem)
result = dataset.read()
assert result.equals(table)
@pytest.mark.filterwarnings("ignore:'ParquetDataset:DeprecationWarning")
def test_parquet_dataset_partitions_piece_path_with_fsspec(tempdir):
# ARROW-10462 ensure that on Windows we properly use posix-style paths
# as used by fsspec
fsspec = pytest.importorskip("fsspec")
filesystem = fsspec.filesystem('file')
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, tempdir / 'data.parquet')
# pass a posix-style path (using "/" also on Windows)
path = str(tempdir).replace("\\", "/")
dataset = pq.ParquetDataset(path, filesystem=filesystem)
# ensure the piece path is also posix-style
expected = path + "/data.parquet"
assert dataset.pieces[0].path == expected
@pytest.mark.dataset
def test_parquet_dataset_deprecated_properties(tempdir):
table = pa.table({'a': [1, 2, 3]})
path = tempdir / 'data.parquet'
pq.write_table(table, path)
dataset = pq.ParquetDataset(path)
with pytest.warns(DeprecationWarning, match="'ParquetDataset.pieces"):
dataset.pieces
with pytest.warns(DeprecationWarning, match="'ParquetDataset.partitions"):
dataset.partitions
with pytest.warns(DeprecationWarning, match="'ParquetDataset.memory_map"):
dataset.memory_map
with pytest.warns(DeprecationWarning, match="'ParquetDataset.read_dictio"):
dataset.read_dictionary
with pytest.warns(DeprecationWarning, match="'ParquetDataset.buffer_size"):
dataset.buffer_size
with pytest.warns(DeprecationWarning, match="'ParquetDataset.fs"):
dataset.fs
dataset2 = pq.ParquetDataset(path, use_legacy_dataset=False)
with pytest.warns(DeprecationWarning, match="'ParquetDataset.pieces"):
dataset2.pieces
|
|
""" A wxPython based color gradient editor for vtkLookupTables and
color transfer functions.
This code is distributed under the conditions of the BSD license.
Based on a Tk version of this widget by Gerald Knizia <cgk.d@gmx.net>
Ported to wxPython by Pete Schmitt <schmitt@colorado.edu>
Cleaned up and enhanced for use with MayaVi2 by Prabhu Ramachandran
Copyright (c) 2005-2015, Gerald Knizia, Pete Schmitt and Prabhu Ramachandran
"""
# Third-party imports
import wx
# Local imports
from .gradient_editor import (ColorControlPoint, ChannelBase, FunctionControl,
GradientEditorWidget)
##########################################################################
# `wxGradientControl` class.
##########################################################################
class wxGradientControl(wx.Panel):
"""Widget which displays the gradient represented by an GradientTable
object (and does nothing beyond that)"""
def __init__(self, masterPanel, gradient_table, width, height ):
"""master: panel in which to place the control. GradientTable is the
Table to which to attach."""
wx.Panel.__init__(self, masterPanel, size=wx.Size(width, height),
style=wx.RAISED_BORDER,
name="Colormap Panel")
self.SetBackgroundColour(wx.Colour(255,255,255))
self.width = width
self.height = height
self.gradient_table = gradient_table
assert( gradient_table.size == width )
# ^- currently only able to use gradient tables in the same size as the canvas width
# bind paint event to redraw when resizing/creating window...
wx.EVT_PAINT(self, self.OnPaint)
def OnPaint(self, event):
""" Paint event handler for when the window is resized and
whatnot."""
dc = wx.PaintDC(self)
self.update()
def update(self):
"""Repaint the control."""
#self.canvas.delete(tk.ALL) # clears all lines contained.
dc = wx.ClientDC(self)
dc.SetBackground(wx.Brush(wx.Colour(0,0,0), wx.SOLID))
dc.Clear()
width, height = self.GetSize()
# From the old tk GradientEditor:
# a look around the web (http://wiki.tcl.tk/11868) told me that
# using the PhotoImage tk-control would not be a good idea and
# that line objects work faster. While I doubt this is an optimal
# solution it currently works fast enought.
# So... let's do the same thing for the new and improved (?) wxPython GradientEditor.
xform = self.gradient_table.scaling_function
start_y = 0
end_y = height
if xform:
# if a scaling transformation is provided, paint the original
# gradient under the scaled gradient.
start_y = height/2
# paint the original gradient as it stands in the table.
dc.BeginDrawing()
for x in range(width):
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(float(x)/(width-1))
dc.SetPen(wx.Pen(wx.Colour(int(255*r),int(255*g),int(255*b))))
dc.SetBrush(wx.Brush((int(255*r),int(255*g),int(255*b)), wx.SOLID))
dc.DrawLine(x, start_y, x, end_y)
if xform:
# paint the scaled gradient below
end_y = start_y
start_y = 0
for x in range(width):
f = float(x)/(width-1)
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(xform(f))
dc.SetBrush(wx.Brush((int(255*r),int(255*g),int(255*b)), wx.SOLID))
dc.DrawLine(x, start_y, x, end_y)
dc.EndDrawing()
##########################################################################
# `Channel` class.
##########################################################################
class Channel(ChannelBase):
def paint(self, deviceContext):
"""Paint current channel into Canvas (a canvas of a function control
object).
Contents of the canvas are not deleted prior to painting,
so more than one channel can be painted into the same canvas."""
dc = deviceContext
table = self.control.table
# only control points which are active for the current channel
# are to be painted. filter them out.
relevant_control_points = [
x for x in table.control_points if self.name in x.active_channels
]
dc.BeginDrawing()
# lines between control points
dc.SetPen(wx.Pen(self.rgb_color,1))
#dc.SetBrush(wx.Brush((255,255,255), wx.SOLID))
dc.SetBrush(wx.Brush((255,255,255), wx.SOLID))
for k in range( len(relevant_control_points) - 1 ):
cur_point = relevant_control_points[k]
next_point = relevant_control_points[1+k]
dc.DrawLine( self.get_pos_index(cur_point.pos),
self.get_value_index(cur_point.color),
self.get_pos_index(next_point.pos),
self.get_value_index(next_point.color))
# control points themself.
dc.SetPen(wx.Pen("BLACK",1))
dc.SetBrush(wx.Brush((255,255,255), wx.SOLID))
for control_point in relevant_control_points:
x = self.get_pos_index( control_point.pos )
y = self.get_value_index( control_point.color )
radius=6
#print(x,y)
dc.DrawRectangle(x-(radius/2.0), y-(radius/2.0),radius,radius)
dc.DrawRectangle(100,80,6,6)
dc.EndDrawing()
##########################################################################
# `wxFunctionControl` class.
##########################################################################
class wxFunctionControl(wx.Panel, FunctionControl):
"""Widget which displays a rectangular regions on which hue, sat, val
or rgb values can be modified. An function control can have one or more
attached color channels."""
# Radius around a control point center in which we'd still count a
# click as "clicked the control point"
control_pt_click_tolerance = 4
ChannelFactory = Channel
def __init__(self, master, gradient_table, color_space, width, height):
"""Initialize a function control widget on tkframe master.
Parameters:
-----------
master: The master widget. Note that this widget *must* have
the methods specified in the `AbstractGradientEditorWidget`
interface.
on_table_changed: Callback function taking a bool argument of meaning
'FinalUpdate'. FinalUpdate is true if a control point is dropped,
created or removed and false if the update is due to a control point
currently beeing dragged (but not yet dropped)
color_space: String which specifies the channels painted on this control.
May be any combination of h,s,v,r,g,b,a in which each channel
occurs only once.
set_status_text: a callback used to set the status text
when using the editor.
"""
FunctionControl.__init__(self, master, gradient_table, color_space,
width, height)
wx.Panel.__init__(self, master, size=wx.Size(width, height),
name="RGBHSVA Editor")
self.update()
wx.EVT_LEFT_DOWN(self, self.on_left_button_down)
wx.EVT_LEFT_UP(self, self.on_left_button_up)
wx.EVT_RIGHT_DOWN(self, self.on_right_button_down)
wx.EVT_RIGHT_UP(self, self.on_right_button_up)
wx.EVT_MOTION(self, self.on_mouse_move)
wx.EVT_PAINT(self, self.on_paint)
wx.EVT_LEAVE_WINDOW(self, self.on_leave_window)
######################################################################
# wxPython event methods.
######################################################################
def update(self, event = None):
"""Repaint the control."""
dc = wx.ClientDC(self)
#if we have a custom background, we *must* set the background brush *BEFORE* clearing...
dc.SetBackground(wx.Brush(wx.Colour(255,255,255), wx.SOLID))
dc.Clear()
for channel in self.channels:
channel.paint(dc)
def on_paint(self, event=None):
dc = wx.PaintDC(self)
self.update()
def on_left_button_down(self, event):
self.cur_drag = self.find_control_point( event.GetX(), event.GetY() )
def on_left_button_up(self, event):
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
def on_leave_window(self, event):
self.on_left_button_up(event)
def on_right_button_down(self, event):
pass
def on_right_button_up(self, event):
# toggle control point. check if there is a control point
# under the mouse. If yes, delete it, if not, create one
# at that point.
cur_control_point = self.find_control_point(event.GetX(), None)
if cur_control_point:
# found a marker at the click position. delete it and return,
# unless it is a fixed marker (at pos 0 or 1)..
if ( cur_control_point[1].fixed ):
# in this case do nothing. Fixed markers cannot be deleted.
return
self.table.control_points.remove(cur_control_point[1])
self.table_config_changed(final_update=True)
else:
# since there was no marker to remove at the point, we assume
# that we should place one there
new_control_point = ColorControlPoint(active_channels = self.active_channels_string)
new_control_point.set_pos(self.channels[0].get_index_pos(event.GetX()))
# set new control point color to the color currently present
# at its designated position
new_control_point.color = self.table.get_pos_color(new_control_point.pos)
self.table.insert_control_point( new_control_point )
self.table_config_changed( final_update = True )
def on_mouse_move(self, event):
# currently dragging a control point?
channel = None
point = None
if self.cur_drag:
channel = self.cur_drag[0]
point = self.cur_drag[1]
if ( not point.fixed ):
point.set_pos( channel.get_index_pos(event.GetX()) )
point.activate_channels( self.active_channels_string )
self.table.sort_control_points()
channel.set_value_index( point.color, event.GetY() )
self.table_config_changed( final_update = False )
screenX = event.GetX()
screenY = event.GetY()
width, height = self.GetSize()
master = self.master
s1, s2 = master.get_table_range()
if channel is not None:
name = self.text_map[channel.name]
pos = s1 + (s2 - s1)*point.pos
val = channel.get_value(point.color)
txt = '%s: (%.3f, %.3f)'%(name, pos, val)
else:
x = s1 + (s2 - s1)*float(screenX)/(width-1)
y = 1.0 - float(screenY)/(height-1)
txt = "position: (%.3f, %.3f)"%(x, y)
self.master.set_status_text(txt)
##########################################################################
# `wxGradientEditorWidget` class.
##########################################################################
class wxGradientEditorWidget(wx.Panel, GradientEditorWidget):
"""A Gradient Editor widget that can be used anywhere.
"""
def __init__(self, master, vtk_table, on_change_color_table=None,
colors=None):
"""
Parameters:
-----------
vtk_table : the `tvtk.LookupTable` or `tvtk.VolumeProperty` object
to set.
on_change_color_table : A callback called when the color table
changes.
colors : list of 'rgb', 'hsv', 'h', 's', 'v', 'a'
(Default : ['rgb', 'hsv', 'a'])
'rgb' creates one panel to edit Red, Green and Blue
colors.
'hsv' creates one panel to edit Hue, Saturation and
Value.
'h', 's', 'v', 'r', 'g', 'b', 'a' separately
specified creates different panels for each.
"""
GradientEditorWidget.__init__(self, master, vtk_table,
on_change_color_table, colors)
wx.Panel.__init__(self, master)
gradient_preview_width = self.gradient_preview_width
gradient_preview_height = self.gradient_preview_height
channel_function_width = self.channel_function_width
channel_function_height = self.channel_function_height
# set up all the panels in a gridbagsizer (i.e. a big grid)
# 6x2 size: 6 rows, 2 columns...
sizer = wx.GridBagSizer(2, 2)
# "Gradient Viewer" panel, in position (0,1) for sizer
self.gradient_control = wxGradientControl(self,
self.gradient_table,
gradient_preview_width,
gradient_preview_height)
tt = wx.ToolTip('Right click for menu')
self.gradient_control.Bind(wx.EVT_CONTEXT_MENU, self.on_gradient_menu)
self.gradient_control.SetToolTip(tt)
sizer.Add(self.gradient_control, pos=(0,1))
# Add the function controls:
function_controls = self.function_controls
editor_data = self.editor_data
row = 1
for color in self.colors:
data = editor_data[color]
control = wxFunctionControl(self, self.gradient_table, color,
channel_function_width,
channel_function_height)
txt = data[0] + self.tooltip_text
control.SetToolTip(wx.ToolTip(txt))
# Add name of editor (to left side of editor)
sizer.Add(wx.StaticText(self, -1, data[1]), pos=(row, 0),
flag=wx.ALIGN_CENTER|wx.ALL)
# Add the "RGB" control point editor
sizer.Add(control, pos=(row, 1))
function_controls.append(control)
row += 1
# The status text.
self.text = wx.StaticText(self, -1, 'status')
sizer.Add(self.text, (row,0), (row,2))
row += 1
# set the appropriate sizer.
sizer.SetSizeHints(self)
self.SetSizerAndFit(sizer)
######################################################################
# `wxGradientEditorWidget` interface.
######################################################################
def set_status_text(self, msg):
t = self.text
t.SetLabel(msg)
t.Refresh()
t.Update()
######################################################################
# wxPython event methods.
######################################################################
def on_gradient_menu(self, event):
if not hasattr(self, 'save_menuid'):
# Do this only the first time.
self.save_menuid = wx.NewId()
self.load_menuid = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_save, id=self.save_menuid)
self.Bind(wx.EVT_MENU, self.on_load, id=self.load_menuid)
menu = wx.Menu()
menu.Append(self.save_menuid, "Save as")
menu.Append(self.load_menuid, "Load")
self.PopupMenu(menu)
menu.Destroy()
def on_save(self, event):
"""
Open "Save" dialog, write lookuptable to 3 files: ``*.lut``
(lookuptable) ``*.grad`` (gradient table for use with this program),
and ``*.jpg`` (image of the gradient)
"""
dlg = wx.FileDialog(self, "Save LUT to...", style=wx.SAVE)
wildcard = "Gradient Files (.grad)|*.grad|" \
"All files (*.*)|*.*"
dlg.SetWildcard(wildcard)
if (dlg.ShowModal() == wx.ID_OK):
file_name = dlg.GetPath()
if file_name:
self.save(file_name)
def on_load(self, event):
"""
Load a ``*.grad`` lookuptable file using wxpython dialog
"""
style = wx.OPEN | wx.HIDE_READONLY
dlg = wx.FileDialog(self, "Open a file", style=style)
wildcard = "Gradient Files (.grad)|*.grad|" \
"All files (*.*)|*.*"
dlg.SetWildcard(wildcard)
if (dlg.ShowModal() == wx.ID_OK):
file_name = dlg.GetPath()
if file_name:
self.load(file_name)
##########################################################################
# `wxGradientEditor` class.
##########################################################################
class wxGradientEditor(wx.Frame):
""" wxPython frame that displays the gradient editor window,
i.e. the thing that contains the gradient display, the function
controls and the buttons.
"""
def __init__(self, vtk_table, on_change_color_table = None, colors=None):
"""Initialize the gradient editor window.
Parameters
----------
vtk_table: Instance of vtkLookupTable, designating the table which is
to be edited.
on_change_color_table: Callback function taking no arguments. Called
when the color table was changed and rendering is
requested.
"""
wx.Frame.__init__(self, None, -1, "Color Gradient Editor",
wx.DefaultPosition, [350, 400])
self.widget = wxGradientEditorWidget(self, vtk_table,
on_change_color_table,
colors)
# draw the rest of the GUI (i.e. statusbar, menubar, etc.
self.SetupMenuBar()
self.CreateStatusBar()
def SetupMenuBar(self):
"""
Create menus (i.e. Create Filemenu and submenus, help menu, ...)
"""
## Set up the MenuBar
MenuBar = wx.MenuBar()
#FILE Menu....
file_menu = wx.Menu()
item = file_menu.Append(-1, "&Save","Save CTF")
self.Bind(wx.EVT_MENU, self.widget.on_save, item)
item = file_menu.Append(-1, "&Load","Load CTF")
self.Bind(wx.EVT_MENU, self.widget.on_load, item)
item = file_menu.Append(-1, "&Close","Close this frame")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(file_menu, "&File")
help_menu = wx.Menu()
item = help_menu.Append(-1, "&Help", "Help")
self.Bind(wx.EVT_MENU, self.OnHelp, item)
item = help_menu.Append(-1, "&About", "About")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
MenuBar.Append(help_menu, "&Help")
self.SetMenuBar(MenuBar)
def OnQuit(self, event):
self.Close()
def OnHelp(self, event):
""" Help defining the mouse interactions """
message = "Right click to add control points. Left click to move control points"
dlg = wx.MessageDialog(self, message,
'About wxGradientEditor',
wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
def OnAbout(self, event):
""" Who wrote the program?"""
message = 'tk Gradient Editor for MayaVi1: Gerald Knizia (cgk.d@gmx.net)\n'\
'wxPython port: Pete Schmitt (schmitt@colorado.edu)\n'\
'Enhanced for MayaVi2: Prabhu Ramachandran'
dlg = wx.MessageDialog(self, message,
'About wxGradientEditor',
wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
##########################################################################
# Test application.
##########################################################################
def main():
from .traitsui_gradient_editor import make_test_table
table, ctf, otf = make_test_table(lut=False)
# the actual gradient editor code.
def on_color_table_changed():
"""If we had a vtk window running, update it here"""
print("Update Render Window")
app = wx.PySimpleApp()
editor = wxGradientEditor(table,
on_color_table_changed,
colors=['rgb', 'a', 'h', 's', 'v'],
)
editor.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
|
import uuid
from django.conf import settings
from django.db import transaction
from django.forms import ValidationError
from django.utils.translation import gettext
import waffle
from celery import chain, chord
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import amo, core
from olympia.amo.urlresolvers import linkify_and_clean
from olympia.files.models import File, FileUpload
from olympia.files.tasks import repack_fileupload
from olympia.files.utils import parse_addon, parse_xpi
from olympia.scanners.tasks import run_customs, run_wat, run_yara, call_mad_api
from olympia.translations.models import Translation
from olympia.versions.models import Version
from olympia.versions.utils import process_color_value
from . import tasks
log = olympia.core.logger.getLogger('z.devhub')
def process_validation(validation, file_hash=None, channel=amo.RELEASE_CHANNEL_LISTED):
"""Process validation results into the format expected by the web
frontend, including transforming certain fields into HTML, mangling
compatibility messages, and limiting the number of messages displayed."""
validation = fix_addons_linter_output(validation, channel=channel)
# Set an ending tier if we don't have one (which probably means
# we're dealing with mock validation results or the addons-linter).
validation.setdefault('ending_tier', 0)
if not validation['ending_tier'] and validation['messages']:
validation['ending_tier'] = max(
msg.get('tier', -1) for msg in validation['messages']
)
limit_validation_results(validation)
htmlify_validation(validation)
return validation
def limit_validation_results(validation):
"""Limit the number of messages displayed in a set of validation results,
and if truncation has occurred, add a new message explaining so."""
messages = validation['messages']
lim = settings.VALIDATOR_MESSAGE_LIMIT
if lim and len(messages) > lim:
# Sort messages by severity first so that the most important messages
# are the one we keep.
TYPES = {'error': 0, 'warning': 2, 'notice': 3}
def message_key(message):
return TYPES.get(message.get('type'))
messages.sort(key=message_key)
leftover_count = len(messages) - lim
del messages[lim:]
# The type of the truncation message should be the type of the most
# severe message in the results.
if validation['errors']:
msg_type = 'error'
elif validation['warnings']:
msg_type = 'warning'
else:
msg_type = 'notice'
compat_type = (
msg_type if any(msg.get('compatibility_type') for msg in messages) else None
)
message = (
gettext(
'Validation generated too many errors/warnings so %s '
'messages were truncated. After addressing the visible '
"messages, you'll be able to see the others."
)
% leftover_count
)
messages.insert(
0,
{
'tier': 1,
'type': msg_type,
# To respect the message structure, see bug 1139674.
'id': ['validation', 'messages', 'truncated'],
'message': message,
'description': [],
'compatibility_type': compat_type,
},
)
def htmlify_validation(validation):
"""Process the `message` and `description` fields into
safe HTML, with URLs turned into links."""
for msg in validation['messages']:
msg['message'] = linkify_and_clean(msg['message'])
if 'description' in msg:
# Description may be returned as a single string, or list of
# strings. Turn it into lists for simplicity on the client side.
if not isinstance(msg['description'], (list, tuple)):
msg['description'] = [msg['description']]
msg['description'] = [
linkify_and_clean(text) for text in msg['description']
]
def fix_addons_linter_output(validation, channel):
"""Make sure the output from the addons-linter is the same as amo-validator
for backwards compatibility reasons."""
if 'messages' in validation:
# addons-linter doesn't contain this, return the original validation
# untouched
return validation
def _merged_messages():
for type_ in ('errors', 'notices', 'warnings'):
for msg in validation[type_]:
# FIXME: Remove `uid` once addons-linter generates it
msg['uid'] = uuid.uuid4().hex
msg['type'] = msg.pop('_type')
msg['id'] = [msg.pop('code')]
# We don't have the concept of tiers for the addons-linter
# currently
msg['tier'] = 1
yield msg
identified_files = {
name: {'path': path}
for name, path in validation['metadata'].get('jsLibs', {}).items()
}
# Essential metadata.
metadata = {
'listed': channel == amo.RELEASE_CHANNEL_LISTED,
'identified_files': identified_files,
}
# Add metadata already set by the linter.
metadata.update(validation.get('metadata', {}))
return {
'success': not validation['errors'],
'compatibility_summary': {
'warnings': 0,
'errors': 0,
'notices': 0,
},
'notices': validation['summary']['notices'],
'warnings': validation['summary']['warnings'],
'errors': validation['summary']['errors'],
'messages': list(_merged_messages()),
'metadata': metadata,
'ending_tier': 5,
}
class Validator:
"""
Class which handles creating or fetching validation results for File
and FileUpload instances.
It forwards the actual validation to `devhub.tasks:validate_upload`
and `devhub.tasks:validate_file` but implements shortcuts for
legacy add-ons and search plugins to avoid running the linter.
"""
def __init__(self, file_, addon=None, listed=None, final_task=None):
self.addon = addon
self.file = None
self.prev_file = None
if isinstance(file_, FileUpload):
assert listed is not None
channel = (
amo.RELEASE_CHANNEL_LISTED if listed else amo.RELEASE_CHANNEL_UNLISTED
)
is_mozilla_signed = False
# We're dealing with a bare file upload. Try to extract the
# metadata that we need to match it against a previous upload
# from the file itself.
try:
addon_data = parse_addon(file_, minimal=True)
is_mozilla_signed = addon_data.get('is_mozilla_signed_extension', False)
except ValidationError as form_error:
log.info(
'could not parse addon for upload {}: {}'.format(
file_.pk, form_error
)
)
addon_data = None
else:
file_.update(version=addon_data.get('version'))
assert not file_.validation
validation_tasks = self.create_file_upload_tasks(
upload_pk=file_.pk, channel=channel, is_mozilla_signed=is_mozilla_signed
)
elif isinstance(file_, File):
# The listed flag for a File object should always come from
# the status of its owner Addon. If the caller tries to override
# this, something is wrong.
assert listed is None
channel = file_.version.channel
is_mozilla_signed = file_.is_mozilla_signed_extension
self.file = file_
self.addon = self.file.version.addon
addon_data = {'guid': self.addon.guid, 'version': self.file.version.version}
validation_tasks = [
tasks.create_initial_validation_results.si(),
tasks.validate_file.s(file_.pk),
tasks.handle_file_validation_result.s(file_.pk),
]
else:
raise ValueError
if final_task:
validation_tasks.append(final_task)
self.task = chain(*validation_tasks)
# Create a cache key for the task, so multiple requests to validate the
# same object do not result in duplicate tasks.
opts = file_._meta
self.cache_key = 'validation-task:{}.{}:{}:{}'.format(
opts.app_label, opts.object_name, file_.pk, listed
)
def get_task(self):
"""Return task chain to execute to trigger validation."""
return self.task
def create_file_upload_tasks(self, upload_pk, channel, is_mozilla_signed):
"""
This method creates the validation chain used during the submission
process, combining tasks in parallel (chord) with tasks chained
together (where the output is used as input of the next task).
"""
tasks_in_parallel = [tasks.forward_linter_results.s(upload_pk)]
if waffle.switch_is_active('enable-yara'):
tasks_in_parallel.append(run_yara.s(upload_pk))
if waffle.switch_is_active('enable-customs'):
tasks_in_parallel.append(run_customs.s(upload_pk))
if waffle.switch_is_active('enable-wat'):
tasks_in_parallel.append(run_wat.s(upload_pk))
return [
tasks.create_initial_validation_results.si(),
repack_fileupload.s(upload_pk),
tasks.validate_upload.s(upload_pk, channel),
tasks.check_for_api_keys_in_file.s(upload_pk),
chord(tasks_in_parallel, call_mad_api.s(upload_pk)),
tasks.handle_upload_validation_result.s(
upload_pk, channel, is_mozilla_signed
),
]
def extract_theme_properties(addon, channel):
version = addon.find_latest_version(channel)
if not version:
return {}
try:
parsed_data = parse_xpi(
version.file.file_path, addon=addon, user=core.get_user()
)
except ValidationError:
# If we can't parse the existing manifest safely return.
return {}
theme_props = parsed_data.get('theme', {})
# pre-process colors to deprecated colors; strip spaces.
theme_props['colors'] = dict(
process_color_value(prop, color)
for prop, color in theme_props.get('colors', {}).items()
)
# upgrade manifest from deprecated headerURL to theme_frame
if 'headerURL' in theme_props.get('images', {}):
url = theme_props['images'].pop('headerURL')
theme_props['images']['theme_frame'] = url
return theme_props
def wizard_unsupported_properties(data, wizard_fields):
# collect any 'theme' level unsupported properties
unsupported = [key for key in data.keys() if key not in ['colors', 'images']]
# and any unsupported 'colors' properties
unsupported += [key for key in data.get('colors', {}) if key not in wizard_fields]
# and finally any 'images' properties (wizard only supports the background)
unsupported += [key for key in data.get('images', {}) if key != 'theme_frame']
return unsupported
def fetch_existing_translations_from_addon(addon, properties):
translation_ids_gen = (getattr(addon, prop + '_id', None) for prop in properties)
translation_ids = [id_ for id_ in translation_ids_gen if id_]
# Just get all the values together to make it simplier
return {str(value) for value in Translation.objects.filter(id__in=translation_ids)}
def add_manifest_version_error(validation):
mv = validation.get('metadata', {}).get('manifestVersion')
if (
mv != 3
or waffle.switch_is_active('enable-mv3-submissions')
or 'messages' not in validation
):
return
msg = gettext(
'Manifest V3 is currently not supported for upload. '
'{start_href}Read more about the support timeline{end_href}.'
)
url = 'https://blog.mozilla.org/addons/2021/05/27/manifest-v3-update/'
start_href = f'<a href="{url}" target="_blank" rel="noopener">'
new_error_message = msg.format(start_href=start_href, end_href='</a>')
for index, message in enumerate(validation['messages']):
if message.get('dataPath') == '/manifest_version':
# if we find the linter manifest_version=3 warning, replace it
validation['messages'][index]['message'] = new_error_message
break
else:
# otherwise insert a new error at the start of the errors
validation['messages'].insert(
0,
{
'type': 'error',
'message': new_error_message,
'tier': 1,
'fatal': True,
},
)
@transaction.atomic
def create_version_for_upload(addon, upload, channel, parsed_data=None):
fileupload_exists = addon.fileupload_set.filter(
created__gt=upload.created, version=upload.version
).exists()
version_exists = Version.unfiltered.filter(
addon=addon, version=upload.version
).exists()
if fileupload_exists or version_exists:
log.info(
'Skipping Version creation for {upload_uuid} that would '
' cause duplicate version'.format(upload_uuid=upload.uuid)
)
return None
else:
log.info(
'Creating version for {upload_uuid} that passed '
'validation'.format(upload_uuid=upload.uuid)
)
# Note: if we somehow managed to get here with an invalid add-on,
# parse_addon() will raise ValidationError and the task will fail
# loudly in sentry.
if parsed_data is None:
parsed_data = parse_addon(upload, addon, user=upload.user)
new_addon = not Version.unfiltered.filter(addon=addon).exists()
version = Version.from_upload(
upload,
addon,
channel,
selected_apps=[x[0] for x in amo.APPS_CHOICES],
parsed_data=parsed_data,
)
channel_name = amo.CHANNEL_CHOICES_API[channel]
# This function is only called via the signing api flow
statsd.incr(
f'signing.submission.{"addon" if new_addon else "version"}.{channel_name}'
)
# The add-on's status will be STATUS_NULL when its first version is
# created because the version has no files when it gets added and it
# gets flagged as invalid. We need to manually set the status.
if addon.status == amo.STATUS_NULL and channel == amo.RELEASE_CHANNEL_LISTED:
addon.update(status=amo.STATUS_NOMINATED)
return version
|
|
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
import pytest
from sklearn.base import clone
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.metrics import check_scoring
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
def _assert_predictor_equal(gb_1, gb_2, X):
"""Assert that two HistGBM instances are identical."""
# Check identical nodes for each tree
for (pred_ith_1, pred_ith_2) in zip(gb_1._predictors, gb_2._predictors):
for (predictor_1, predictor_2) in zip(pred_ith_1, pred_ith_2):
assert_array_equal(predictor_1.nodes, predictor_2.nodes)
# Check identical predictions
assert_allclose(gb_1.predict(X), gb_2.predict(X))
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
def test_max_iter_with_warm_start_validation(GradientBoosting, X, y):
# Check that a ValueError is raised when the maximum number of iterations
# is smaller than the number of iterations from the previous fit when warm
# start is True.
estimator = GradientBoosting(max_iter=10, early_stopping=False,
warm_start=True)
estimator.fit(X, y)
estimator.set_params(max_iter=5)
err_msg = ('max_iter=5 must be larger than or equal to n_iter_=10 '
'when warm_start==True')
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
def test_warm_start_yields_identical_results(GradientBoosting, X, y):
# Make sure that fitting 50 iterations and then 25 with warm start is
# equivalent to fitting 75 iterations.
rng = 42
gb_warm_start = GradientBoosting(
n_iter_no_change=100, max_iter=50, random_state=rng, warm_start=True
)
gb_warm_start.fit(X, y).set_params(max_iter=75).fit(X, y)
gb_no_warm_start = GradientBoosting(
n_iter_no_change=100, max_iter=75, random_state=rng, warm_start=False
)
gb_no_warm_start.fit(X, y)
# Check that both predictors are equal
_assert_predictor_equal(gb_warm_start, gb_no_warm_start, X)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
def test_warm_start_max_depth(GradientBoosting, X, y):
# Test if possible to fit trees of different depth in ensemble.
gb = GradientBoosting(max_iter=20, min_samples_leaf=1,
warm_start=True, max_depth=2, early_stopping=False)
gb.fit(X, y)
gb.set_params(max_iter=30, max_depth=3, n_iter_no_change=110)
gb.fit(X, y)
# First 20 trees have max_depth == 2
for i in range(20):
assert gb._predictors[i][0].get_max_depth() == 2
# Last 10 trees have max_depth == 3
for i in range(1, 11):
assert gb._predictors[-i][0].get_max_depth() == 3
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize('scoring', (None, 'loss'))
def test_warm_start_early_stopping(GradientBoosting, X, y, scoring):
# Make sure that early stopping occurs after a small number of iterations
# when fitting a second time with warm starting.
n_iter_no_change = 5
gb = GradientBoosting(
n_iter_no_change=n_iter_no_change, max_iter=10000, early_stopping=True,
random_state=42, warm_start=True, tol=1e-3, scoring=scoring,
)
gb.fit(X, y)
n_iter_first_fit = gb.n_iter_
gb.fit(X, y)
n_iter_second_fit = gb.n_iter_
assert 0 < n_iter_second_fit - n_iter_first_fit < n_iter_no_change
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
def test_warm_start_equal_n_estimators(GradientBoosting, X, y):
# Test if warm start with equal n_estimators does nothing
gb_1 = GradientBoosting(max_depth=2, early_stopping=False)
gb_1.fit(X, y)
gb_2 = clone(gb_1)
gb_2.set_params(max_iter=gb_1.max_iter, warm_start=True,
n_iter_no_change=5)
gb_2.fit(X, y)
# Check that both predictors are equal
_assert_predictor_equal(gb_1, gb_2, X)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
def test_warm_start_clear(GradientBoosting, X, y):
# Test if fit clears state.
gb_1 = GradientBoosting(n_iter_no_change=5, random_state=42)
gb_1.fit(X, y)
gb_2 = GradientBoosting(n_iter_no_change=5, random_state=42,
warm_start=True)
gb_2.fit(X, y) # inits state
gb_2.set_params(warm_start=False)
gb_2.fit(X, y) # clears old state and equals est
# Check that both predictors have the same train_score_ and
# validation_score_ attributes
assert_allclose(gb_1.train_score_, gb_2.train_score_)
assert_allclose(gb_1.validation_score_, gb_2.validation_score_)
# Check that both predictors are equal
_assert_predictor_equal(gb_1, gb_2, X)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize('rng_type', ('none', 'int', 'instance'))
def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type):
# Make sure the seeds for train/val split and small trainset subsampling
# are correctly set in a warm start context.
def _get_rng(rng_type):
# Helper to avoid consuming rngs
if rng_type == 'none':
return None
elif rng_type == 'int':
return 42
else:
return np.random.RandomState(0)
random_state = _get_rng(rng_type)
gb_1 = GradientBoosting(early_stopping=True, max_iter=2,
random_state=random_state)
gb_1.set_params(scoring=check_scoring(gb_1))
gb_1.fit(X, y)
random_seed_1_1 = gb_1._random_seed
gb_1.fit(X, y)
random_seed_1_2 = gb_1._random_seed # clear the old state, different seed
random_state = _get_rng(rng_type)
gb_2 = GradientBoosting(early_stopping=True, max_iter=2,
random_state=random_state, warm_start=True)
gb_2.set_params(scoring=check_scoring(gb_2))
gb_2.fit(X, y) # inits state
random_seed_2_1 = gb_2._random_seed
gb_2.fit(X, y) # clears old state and equals est
random_seed_2_2 = gb_2._random_seed
# Without warm starting, the seeds should be
# * all different if random state is None
# * all equal if random state is an integer
# * different when refitting and equal with a new estimator (because
# the random state is mutated)
if rng_type == 'none':
assert random_seed_1_1 != random_seed_1_2 != random_seed_2_1
elif rng_type == 'int':
assert random_seed_1_1 == random_seed_1_2 == random_seed_2_1
else:
assert random_seed_1_1 == random_seed_2_1 != random_seed_1_2
# With warm starting, the seeds must be equal
assert random_seed_2_1 == random_seed_2_2
|
|
import os
import time
from homely._engine2 import Cleaner, Engine, Helper, getengine
from homely._errors import HelperError
from homely._ui import allowinteractive, allowpull, note
from homely._utils import haveexecutable, isnecessarypath
from homely.system import execute
def installpkg(name, wantcmd=None, **methods):
for key in methods:
assert key in _METHODS
# FIXME: make sure the user specifies at least one way to install the thing
getengine().run(InstallPackage(name, methods, wantcmd))
_ALLOW_INSTALL = True
def setallowinstall(allow_install):
"""
Configure whether installpkg() InstallPackage() are actually allowed to
install anything.
If installing isn't allowed, installpkg() and InstallPackage() will raise
an error instead of installing the package. This is useful in work
environment where your local sysadmin wants additional packages managed
externally by a tool like salt.
NOTE: This also controls whether InstallFromSource() is allowed to perform
commands starting with "sudo" - the assumption here is that if
InstallFromSource() can't run commands as root, it can't install anything.
Compiling from source and symlinking to ~/bin will still work fine.
"""
global _ALLOW_INSTALL
_ALLOW_INSTALL = bool(allow_install)
class InstallFromSource(Helper):
_title = None
_source_repo = None
_clone_to = None
_real_clone_to = None
_branch = None
_tag = None
_compile = None
# FIXME: we need a better way to specify whether or not a TTY is needed
_needs_tty = False
def __init__(self, source_repo, clone_to):
super(InstallFromSource, self).__init__()
self._title = 'Install %s into %s' % (source_repo, clone_to)
self._source_repo = source_repo
self._clone_to = clone_to
self._real_clone_to = os.path.expanduser(clone_to)
self._symlinks = []
def select_branch(self, branch_name, expiry=None):
# possible values of expiry:
# 0: always pull and compile
# -1: never pull or compile again
# <int>: pull and compile again after <int> seconds
if expiry is None:
expiry = 60 * 60 * 24 * 14
assert self._tag is None
assert type(expiry) is int and expiry >= -1
self._branch = branch_name
self._expiry = expiry
self._branchfact = '{}:compile-branch:{}:{}'.format(
self.__class__.__name__,
self._real_clone_to,
branch_name)
def select_tag(self, tag_name):
assert self._branch is None
self._tag = tag_name
self._expiry = None
def symlink(self, target, linkname):
self._symlinks.append((os.path.join(self._real_clone_to, target),
os.path.expanduser(linkname)))
def compile_cmd(self, commands):
assert self._compile is None
self._compile = list(commands)
for cmd in self._compile:
if cmd[0] == "sudo":
self._needs_tty = True
@property
def description(self):
return self._title
def getcleaner(self):
return
def affectspath(self, path):
return isnecessarypath(self._real_clone_to, path)
def pathsownable(self):
ret = {self._real_clone_to: Engine.TYPE_FOLDER_ONLY}
for target, linkname in self._symlinks:
ret[linkname] = Engine.TYPE_LINK
return ret
def getclaims(self):
return []
def isdone(self):
if not os.path.exists(self._real_clone_to):
return False
if self._tag:
# has the correct branch or tag been checked out?
current = execute(['git', 'tag', '--points-at', 'HEAD'],
cwd=self._real_clone_to,
stdout=True)[1]
if self._tag not in map(str, current.splitlines()):
return False
# if there's no symlinks, we can't tell if it's done or not
# TODO: test this before releasing
if not len(self._symlinks):
return False
# do the symlinks exist?
for target, linkname in self._symlinks:
if not os.path.islink(linkname):
return False
if os.readlink(linkname) != target:
return False
# it appears to be done ... yay
return True
def makechanges(self):
assert self._source_repo is not None
assert self._clone_to is not None
if not os.path.exists(self._real_clone_to):
note("Cloning %s" % self._source_repo)
pull_needed = False
execute(['git', 'clone', self._source_repo, self._real_clone_to])
else:
pull_needed = True
if not os.path.exists(os.path.join(self._real_clone_to, '.git')):
raise HelperError("%s is not a git repo" % self._real_clone_to)
# do we want a particular branch?
if self._branch:
execute(['git', 'checkout', self._branch], cwd=self._real_clone_to)
if pull_needed and allowpull():
note("Updating %s from %s" %
(self._clone_to, self._source_repo))
execute(['git', 'pull'], cwd=self._real_clone_to)
# check the branch fact to see if we need to compile again
factname = self._branchfact
else:
assert self._tag is not None
if pull_needed and allowpull():
note("Updating %s from %s" %
(self._clone_to, self._source_repo))
# NOTE: we use --force for projects like neovim that have a
# rolling 'nightly' tag
execute(['git', 'fetch', '--tags', '--force'],
cwd=self._real_clone_to)
execute(['git', 'checkout', self._tag], cwd=self._real_clone_to)
# if we used a tag name, create a 'fact' to prevent us re-compiling
# each time we run
factname = '{}:compile-tag:{}:{}'.format(
self.__class__.__name__,
self._real_clone_to,
self._tag)
docompile = False
if self._compile:
last_compile, prev_cmds = self._getfact(factname, (0, None))
what = ("Branch {}".format(self._branch) if self._branch
else "Tag {}".format(self._tag))
if last_compile == 0:
note("{} has never been compiled".format(what))
docompile = True
elif (self._expiry is not None
and ((last_compile + self._expiry) < time.time())):
note("{} is due to be compiled again".format(what))
docompile = True
elif prev_cmds != self._compile:
note("{} needs to be compiled again with new commands"
.format(what))
docompile = True
# run any compilation commands
if docompile:
# FIXME: we probably need to delete all the symlink targets before
# compiling, as this is our best way of determining that the
# compilation has failed ...
stdout = "TTY" if self._needs_tty else None
for cmd in self._compile:
if cmd[0] == "sudo" and not _ALLOW_INSTALL:
raise HelperError(
"%s is not allowed to run commands as root"
", as per setallowinstall()")
execute(cmd, cwd=self._real_clone_to, stdout=stdout)
self._setfact(factname, (time.time(), self._compile))
# create new symlinks
for source, dest in self._symlinks:
with note("Ensure symlink exists: %s -> %s" % (source, dest)):
if os.path.islink(dest):
target = os.readlink(dest)
if os.path.realpath(target) != os.path.realpath(source):
raise HelperError("Symlink %s is not pointing at %s" %
(dest, source))
continue
if os.path.exists(dest):
raise HelperError("%s already exists" % dest)
os.symlink(source, dest)
_METHODS = ('brew', 'yum', 'apt', 'port', 'pacman')
_ASROOT = ('yum', 'port', 'apt', 'pacman')
_INSTALL = {
'apt': lambda name: ['apt-get', 'install', name, '--quiet',
'--assume-yes'],
'yum': lambda name: ['yum', 'install', name, '--assumeyes'],
'pacman': lambda name: ['pacman', '-S', '--quiet',
'--noconfirm', name],
}
_UNINSTALL = {
'apt': lambda name: ['apt-get', 'remove', name, '--quiet', '--assume-yes'],
'yum': lambda name: ['yum', 'erase', name, '--assumeyes'],
'pacman': lambda name: ['pacman', '-R', '--noconfirm', name],
}
class InstallPackage(Helper):
def __init__(self, name, methods, wantcmd):
super(InstallPackage, self).__init__()
self._name = name
self._methods = methods
self._wantcmd = name if wantcmd is None else wantcmd
def getcleaner(self):
return PackageCleaner(self._name, self._methods)
def pathsownable(self):
return {}
def isdone(self):
return haveexecutable(self._wantcmd)
@property
def description(self):
how = [m for m in _METHODS if self._methods.get(m, True)]
return "Install package %s using %s" % (self._name, how)
def getclaims(self):
yield "package:%s" % self._name
def affectspath(self, path):
return False
def makechanges(self):
# try each method
for method in _METHODS:
localname = self._methods.get(method, self._name)
if localname is False:
continue
def getdefaultcmd(name):
return [method, 'install', name]
cmd = _INSTALL.get(method, getdefaultcmd)(localname)
# see if the required executable is installed
if not haveexecutable(cmd[0]):
continue
if not _ALLOW_INSTALL:
raise HelperError(
"InstallPackage() is not allowed to install packages"
", as per setallowinstall()")
if method in _ASROOT:
if not allowinteractive():
raise HelperError("Need to be able to escalate to root")
cmd.insert(0, 'sudo')
execute(cmd)
# record the fact that we installed this thing ourselves
factname = 'InstalledPackage:%s:%s' % (method, localname)
self._setfact(factname, True)
return
raise HelperError("No way to install %s" % self._name)
class PackageCleaner(Cleaner):
def __init__(self, name, methods):
super(PackageCleaner, self).__init__()
self._name = name
self._methods = methods
def asdict(self):
return dict(name=self._name, methods=self._methods)
@classmethod
def fromdict(class_, data):
return class_(data["name"], data["methods"])
def __eq__(self, other):
return self._name == other._name and self._methods == other._methods
@property
def description(self):
return "Remove package %s" % self._name
def needsclaims(self):
yield "package:%s" % self._name
def isneeded(self):
# look for any of the facts saying we installed these things
for method in _METHODS:
localname = self._methods.get(method, self._name)
factname = 'InstalledPackage:%s:%s' % (method, localname)
if self._getfact(factname, False):
return True
return False
def makechanges(self):
# look for any of the facts saying we installed these things
for method in _METHODS:
localname = self._methods.get(method, self._name)
factname = 'InstalledPackage:%s:%s' % (method, localname)
if not self._getfact(factname, False):
continue
def defaultuninstall(name):
return [method, 'uninstall', name]
cmd = _UNINSTALL.get(method, defaultuninstall)(localname)
if method in _ASROOT:
if not allowinteractive():
raise HelperError("Need to be able to escalate to root")
cmd.insert(0, 'sudo')
try:
execute(cmd)
finally:
# always clear the fact
self._clearfact(factname)
raise HelperError("Didn't remove package %s" % self._name)
def wantspath(self, path):
return False
|
|
__author__ = 'himanshu'
import hashlib
import datetime
import os
from sqlalchemy import create_engine, ForeignKey, Enum
from sqlalchemy.orm import sessionmaker, relationship, backref, scoped_session, validates
from sqlalchemy import Column, Integer, Boolean, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
fullname = Column(String)
nodes = relationship(
"Node",
backref=backref('user'),
cascade="all, delete-orphan"
)
files = relationship(
"File",
backref=backref('user'),
cascade="all, delete-orphan"
)
@hybrid_property
def top_level_nodes(self):
top_nodes = []
for node in self.nodes:
if node.top_level:
top_nodes.append(node)
return top_nodes
def as_dict(self):
return {
"id": str(self.id),
"type": "users",
"attributes": {
"full_name": self.fullname,
"given_name": "",
"middle_names": "",
"family_name": "",
"suffix": "",
"date_registered": "2015-09-11T18:19:01.860000",
"profile_image_url": "https://secure.gravatar.com/avatar/2b40121791d6946b6cdd805dc2ea4b7c?d=identicon"
},
"relationships": {
"nodes": {
"links": {
"related": "http://localhost:5000/v2/users/{}/nodes/".format(self.id)
}
}
},
"links": {
"self": "http://localhost:5000/v2/users/{}/".format(self.id),
"html": "https://staging2.osf.io/m5e83/"
}
}
def __repr__(self):
return "<User(fullname={})>".format(
self.fullname)
class Node(Base):
__tablename__ = "node"
PROJECT = 'project'
COMPONENT = 'component'
id = Column(Integer, primary_key=True)
title = Column(String)
category = Column(Enum(PROJECT, COMPONENT), default=COMPONENT)
date_modified = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
parent_id = Column(Integer, ForeignKey('node.id'))
child_nodes = relationship(
"Node",
backref=backref('parent', remote_side=[id]),
cascade="all, delete-orphan"
)
files = relationship(
"File",
backref=backref('node'),
cascade="all, delete-orphan"
)
@hybrid_property
def top_level(self):
return self.parent is None
@hybrid_property
def providers(self):
file_folders = []
for file_folder in self.files:
if file_folder.parent is None and file_folder.is_folder:
file_folders.append(file_folder)
return file_folders
def as_dict(self):
return {
"id": str(self.id),
"type": "nodes",
"attributes": {
"title": self.title,
"description": None,
"category": self.category,
"date_created": "2015-07-24T14:52:22.359000",
"date_modified": "2015-08-26T15:44:49.395000",
"tags": [],
"registration": True, # todo
"collection": False, # todo
"dashboard": False, # todo
"public": True # todo
},
"relationships": {
"children": {
"links": {
"related": {
"href": "http://localhost:5000/v2/nodes/{}/children/".format(self.id),
"meta": {
"count": len(self.child_nodes)
}
}
}
},
"contributors": {
"links": {
"related": {
"href": "https://staging2-api.osf.io/v2/nodes/243u7/contributors/",
"meta": {
"count": 1
}
}
}
},
"files": {
"links": {
"related": "http://localhost:5000/v2/nodes/{}/files/".format(self.id)
}
},
"node_links": {
"links": {
"related": {
"href": "https://staging2-api.osf.io/v2/nodes/243u7/node_links/",
"meta": {
"count": 0
}
}
}
},
"parent": {
"links": {
"related":{
'href': None if self.top_level else 'http://localhost:5000/v2/nodes/{}/'.format(self.parent_id),
'meta':{}
}
}
},
"registrations": {
"links": {
"related": {
"href": "http://localhost:5000/v2/nodes/{}/registrations/".format(self.id),
"meta": {
"count": 0
}
}
}
}
},
"links": {
"self": "http://localhost:5000/v2/nodes/{}/".format(self.id),
"html": "https://staging2.osf.io/243u7/"
}
}
def __repr__(self):
return "<Node ({}), category={}, title={}, parent_id={}>".format(
self.id, self.category, self.title, self.parent_id
)
class File(Base):
__tablename__ = "file"
FOLDER = 'folder'
FILE = 'file'
DEFAULT_PROVIDER = 'osfstorage'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Enum(FOLDER, FILE), nullable=False)
date_modified = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
provider = Column(String, default=DEFAULT_PROVIDER)
checked_out = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
node_id = Column(Integer, ForeignKey('node.id'), nullable=False)
parent_id = Column(Integer, ForeignKey('file.id'))
contents = Column(String)
files = relationship(
"File",
backref=backref('parent', remote_side=[id]),
cascade="all, delete-orphan",
)
@hybrid_property
def is_file(self):
return self.type == File.FILE
@hybrid_property
def is_folder(self):
return self.type == File.FOLDER
@hybrid_property
def path(self):
if self.has_parent:
temp = '/{}'.format(self.id)
if self.is_folder:
temp += '/'
return temp
else:
return '/'
@hybrid_property
def has_parent(self):
return self.parent is not None
@hybrid_property
def is_provider(self):
return self.is_folder and not self.has_parent
@validates('parent_id')
def validate_parent_id(self, key, parent_id):
if self.parent:
assert self.parent.node == self.node
return parent_id
@validates('node_id')
def validate_node_id(self, key, node_id):
if self.parent:
assert self.parent.node == self.node
return node_id
@validates('files')
def validate_files(self, key, files):
if self.is_file:
assert self.files == []
return files
@validates('contents')
def validate_contents(self, key, contents):
if self.is_folder:
assert self.contents is None
return contents
def as_dict(self):
resp = {
"id": str(self.id),
"type": 'files',
"attributes": {
"name": str(self.name),
"kind": 'file' if self.is_file else 'folder',
"path": self.path,
"provider": "osfstorage",
"last_touched": None,
"size": len(self.contents) if self.is_file else None
},
"relationships": {
"checkout": {
"links": {
"related": None # todo: handle checkouts
}
},
"files": {
"links": {
"related": {
'href': "http://localhost:5000/v2/nodes/{node_id}/files/osfstorage{file_path}".format(node_id=self.node.id, file_path=self.path) if self.is_folder else None,
'meta':{}
}
}
},
"versions": {
"links": {
"related": None # todo: handle versions
}
}
},
"links": {
"info": "http://localhost:5000/v2/files/{}/".format(self.id),
"download": "http://localhost:5000/v1/resources/{}/providers/{}/{}/".format(self.node_id, self.provider, self.id) if self.is_file else None,
"delete": "http://localhost:5000/v1/resources/{}/providers/{}/{}/".format(self.node_id, self.provider, self.id),
"move": "http://localhost:5000/v1/resources/{}/providers/{}/{}/".format(self.node_id, self.provider, self.id),
"upload": "http://localhost:5000/v1/resources/{}/providers/{}/{}/".format(self.node_id, self.provider, self.id),
"new_folder": 'http://localhost:5000/v1/resources/{}/providers/{}/{}/?kind=folder'.format(self.node_id, self.provider, self.id) if self.is_folder else None
}
}
if not self.has_parent:
resp['attributes']['node'] = str(self.node_id)
return resp
def __repr__(self):
return "<File ({}), type={}, name={}, path={}, parent_id={}>".format(
self.id, self.type, self.name, self.path, self.parent
)
|
|
"""
Copyright (c) 2016 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.plugin import PostBuildPlugin, ExitPlugin
from atomic_reactor.plugins.post_pulp_pull import PulpPullPlugin
from atomic_reactor.inner import TagConf, PushConf
from atomic_reactor.util import ImageName
from tests.constants import MOCK
if MOCK:
from tests.retry_mock import mock_get_retry_session
from flexmock import flexmock
import pytest
import requests
import json
DIGEST_V1 = 'sha256:7de72140ec27a911d3f88d60335f08d6530a4af136f7beab47797a196e840afd'
DIGEST_V2 = 'sha256:85a7e3fb684787b86e64808c5b91d926afda9d6b35a0642a72d7a746452e71c1'
class MockerTasker(object):
def __init__(self):
self.pulled_images = []
def pull_image(self, image, insecure):
self.pulled_images.append(image)
return image.to_str()
def inspect_image(self, image):
pass
class TestPostPulpPull(object):
TEST_UNIQUE_IMAGE = 'foo:unique-tag'
CRANE_URI = 'crane.example.com'
EXPECTED_IMAGE = ImageName.parse('%s/%s' % (CRANE_URI, TEST_UNIQUE_IMAGE))
EXPECTED_PULLSPEC = EXPECTED_IMAGE.to_str()
def workflow(self, push=True, sync=True, build_process_failed=False):
tag_conf = TagConf()
tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)
push_conf = PushConf()
if push:
push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=False)
if sync:
push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=True)
mock_get_retry_session()
builder = flexmock()
setattr(builder, 'image_id', 'sha256:(old)')
return flexmock(tag_conf=tag_conf,
push_conf=push_conf,
builder=builder,
build_process_failed=build_process_failed,
plugin_workspace={})
media_type_v1 = 'application/vnd.docker.distribution.manifest.v1+json'
media_type_v2 = 'application/vnd.docker.distribution.manifest.v2+json'
media_type_v2_list = 'application/vnd.docker.distribution.manifest.list.v2+json'
def get_response_config_json(media_type):
return {
'config': {
'digest': 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14',
'mediaType': 'application/octet-stream',
'size': 4132
},
'layers': [
{
'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b',
'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
'size': 71907148
},
{
'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f',
'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
'size': 3945724
}
],
'mediaType': media_type,
'schemaVersion': 2
}
broken_response = {
'schemaVersion': 'foo',
'not-mediaType': 'bar'
}
config_response_config_v1 = requests.Response()
(flexmock(config_response_config_v1,
raise_for_status=lambda: None,
status_code=requests.codes.ok,
json=get_response_config_json(media_type_v1),
headers={
'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json',
'Docker-Content-Digest': DIGEST_V1
}))
config_response_config_v2 = requests.Response()
(flexmock(config_response_config_v2,
raise_for_status=lambda: None,
status_code=requests.codes.ok,
json=get_response_config_json(media_type_v2),
headers={
'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json',
'Docker-Content-Digest': DIGEST_V2
}))
config_response_config_v2_no_headers = requests.Response()
(flexmock(config_response_config_v2_no_headers,
raise_for_status=lambda: None,
status_code=requests.codes.ok,
_content=json.dumps(get_response_config_json(media_type_v2)).encode('utf-8'),
headers={}))
config_response_config_v2_broken = requests.Response()
(flexmock(config_response_config_v2_broken,
raise_for_status=lambda: None,
status_code=requests.codes.ok,
_content=json.dumps(broken_response).encode('utf-8'),
headers={}))
config_response_config_v2_list = requests.Response()
(flexmock(config_response_config_v2_list,
raise_for_status=lambda: None,
status_code=requests.codes.ok,
json=get_response_config_json(media_type_v2_list),
headers={
'Content-Type': 'application/vnd.docker.distribution.manifest.list.v2+json',
}))
def custom_get_v1(self, url, headers, **kwargs):
return self.config_response_config_v1
def custom_get_v2(self, url, headers, **kwargs):
return self.config_response_config_v2
def custom_get_v2_list(self, url, headers, **kwargs):
return self.config_response_config_v2_list
def custom_get_v2_no_headers(self, url, headers, **kwargs):
return self.config_response_config_v2_no_headers
def custom_get_v2_broken(self, url, headers, **kwargs):
return self.config_response_config_v2_broken
@pytest.mark.parametrize(('no_headers, broken_response'), [
(True, True),
(True, False),
(False, False)
])
@pytest.mark.parametrize('insecure', [True, False])
@pytest.mark.parametrize(('schema_version', 'pulp_plugin', 'expected_version'), [
('v1', [], []),
('v1', [{'name': 'pulp_push'}], ['application/json']),
('v1', [{'name': 'pulp_sync'}],
['application/vnd.docker.distribution.manifest.v1+json']),
('v1', [{'name': 'pulp_sync'}, {'name': 'pulp_push'}],
['application/json',
'application/vnd.docker.distribution.manifest.v1+json']),
('v2', [],
['application/vnd.docker.distribution.manifest.v2+json']),
('v2', [{'name': 'pulp_push'}],
['application/json',
'application/vnd.docker.distribution.manifest.v2+json']),
('v2', [{'name': 'pulp_sync'}],
['application/vnd.docker.distribution.manifest.v1+json',
'application/vnd.docker.distribution.manifest.v2+json']),
('v2', [{'name': 'pulp_sync'}, {'name': 'pulp_push'}],
['application/json',
'application/vnd.docker.distribution.manifest.v1+json',
'application/vnd.docker.distribution.manifest.v2+json']),
('list.v2', [],
['application/vnd.docker.distribution.manifest.list.v2+json']),
('list.v2', [{'name': 'pulp_push'}],
['application/json',
'application/vnd.docker.distribution.manifest.list.v2+json']),
('list.v2', [{'name': 'pulp_sync'}],
['application/vnd.docker.distribution.manifest.list.v2+json',
'application/vnd.docker.distribution.manifest.v1+json']),
('list.v2', [{'name': 'pulp_sync'}, {'name': 'pulp_push'}],
['application/json',
'application/vnd.docker.distribution.manifest.list.v2+json',
'application/vnd.docker.distribution.manifest.v1+json']),
])
def test_pull_first_time(self, no_headers, broken_response, insecure, schema_version,
pulp_plugin, expected_version):
workflow = self.workflow()
tasker = MockerTasker()
test_id = 'sha256:(new)'
if schema_version == 'v2':
# for v2, we just return pre-existing ID
test_id = 'sha256:(old)'
if schema_version == 'v1':
getter = self.custom_get_v1
elif schema_version == 'list.v2':
getter = self.custom_get_v2_list
elif no_headers:
if broken_response:
getter = self.custom_get_v2_broken
else:
getter = self.custom_get_v2_no_headers
else:
getter = self.custom_get_v2
(flexmock(requests.Session)
.should_receive('get')
.replace_with(getter))
if schema_version in ['v1', 'list.v2'] or broken_response:
(flexmock(tasker)
.should_call('pull_image')
.with_args(self.EXPECTED_IMAGE, insecure=insecure)
.and_return(self.EXPECTED_PULLSPEC)
.once()
.ordered())
(flexmock(tasker)
.should_receive('inspect_image')
.with_args(self.EXPECTED_PULLSPEC)
.and_return({'Id': test_id})
.once())
else:
(flexmock(tasker)
.should_call('pull_image')
.never())
(flexmock(tasker)
.should_call('inspect_image')
.never())
# Convert pulp_plugin into a JSON string and back into an object
# to make really sure we get a different string object back.
workflow.postbuild_plugins_conf = json.loads(json.dumps(pulp_plugin))
# Set the timeout parameters so that we retry exactly once, but quickly.
# With the get_manifest_digests() API, the 'broken_response' case isn't
# distinguishable from no manifest yet, so we retry until timout and then
# fall through to pulp_pull.
plugin = PulpPullPlugin(tasker, workflow, insecure=insecure,
timeout=0.1, retry_delay=0.25)
version = plugin.run()
if not broken_response:
assert version == expected_version
if schema_version == 'v1':
assert len(tasker.pulled_images) == 1
pulled = tasker.pulled_images[0].to_str()
assert pulled == self.EXPECTED_PULLSPEC
# Image ID is updated in workflow
assert workflow.builder.image_id == test_id
@pytest.mark.parametrize(('push', 'sync'), [
(True, False),
(False, True),
(True, True)
])
def test_pull_push_vs_sync(self, push, sync):
workflow = self.workflow(push=push, sync=sync)
tasker = MockerTasker()
test_id = 'sha256:(new)'
getter = self.custom_get_v1
if sync:
(flexmock(requests.Session)
.should_receive('get')
.replace_with(getter))
else:
(flexmock(requests.Session)
.should_receive('get')
.never())
(flexmock(tasker)
.should_call('pull_image')
.with_args(self.EXPECTED_IMAGE, insecure=False)
.and_return(self.EXPECTED_PULLSPEC)
.ordered())
(flexmock(tasker)
.should_receive('inspect_image')
.with_args(self.EXPECTED_PULLSPEC)
.and_return({'Id': test_id}))
workflow.postbuild_plugins_conf = []
plugin = PulpPullPlugin(tasker, workflow)
plugin.run()
assert workflow.builder.image_id == test_id
assert len(tasker.pulled_images) == 1
@pytest.mark.parametrize('v2,expect_v2schema2', [
(False, False),
(False, True),
(True, False),
(True, True),
])
@pytest.mark.parametrize('timeout,retry_delay,failures,expect_success', [
(0.1, 0.06, 1, True),
(0.1, 0.06, 1, True),
(0.1, 0.06, 3, False),
])
def test_pull_retry(self, expect_v2schema2, v2, timeout, retry_delay, failures,
expect_success):
workflow = self.workflow()
tasker = MockerTasker()
if v2:
test_id = 'sha256:(old)'
else:
# Image ID is updated in workflow
test_id = 'sha256:(new)'
not_found = requests.Response()
flexmock(not_found, status_code=requests.codes.not_found)
expectation = flexmock(requests.Session).should_receive('get')
# If pulp is returning a 404 for a manifest URL, we will get 5 requests
# (for v1, v2, list.v2, oci, and oci.index media types) before get_manifest_digests
# gives up, so we need to return 5 404's to equal one "failure".
for _ in range(5 * failures):
expectation = expectation.and_return(not_found)
expectation.and_return(self.config_response_config_v1)
if v2:
expectation.and_return(self.config_response_config_v2)
else:
expectation.and_return(self.config_response_config_v1)
expectation.and_return(self.config_response_config_v2_list)
# No OCI support in Pulp at the moment, will return a v1 response
expectation.and_return(self.config_response_config_v1)
# A special case for retries - schema 2 manifest digest is expected,
# but its never being sent - the test should fail on timeout
if not v2 and expect_v2schema2:
expect_success = False
expectation = flexmock(tasker).should_call('pull_image')
if v2:
expectation.never()
elif expect_success:
expectation.and_return(self.EXPECTED_PULLSPEC).once()
expectation = flexmock(tasker).should_receive('inspect_image')
if v2:
expectation.never()
elif expect_success:
(expectation
.with_args(self.EXPECTED_PULLSPEC)
.and_return({'Id': test_id})
.once())
workflow.postbuild_plugins_conf = []
plugin = PulpPullPlugin(tasker, workflow, timeout=timeout,
retry_delay=retry_delay,
expect_v2schema2=expect_v2schema2)
if not expect_success:
with pytest.raises(Exception):
plugin.run()
return
plugin.run()
assert len(tasker.pulled_images) == 0 if v2 else 1
if not v2:
img = tasker.pulled_images[0].to_str()
assert img == self.EXPECTED_PULLSPEC
assert workflow.builder.image_id == test_id
def test_plugin_type(self):
# arrangement versions < 4
assert issubclass(PulpPullPlugin, PostBuildPlugin)
# arrangement version >= 4
assert issubclass(PulpPullPlugin, ExitPlugin)
# Verify the plugin does nothing when running as an exit
# plugin for an already-failed build
workflow = self.workflow(build_process_failed=True)
tasker = MockerTasker()
workflow.postbuild_plugins_conf = []
flexmock(requests.Session).should_receive('get').never()
flexmock(tasker).should_receive('pull_image').never()
flexmock(tasker).should_receive('inspect_image').never()
plugin = PulpPullPlugin(tasker, workflow)
media_types = plugin.run()
assert len(media_types) == 0
def test_unexpected_response(self):
workflow = self.workflow()
tasker = MockerTasker()
unauthorized = requests.Response()
flexmock(unauthorized, status_code=requests.codes.unauthorized)
flexmock(requests.Session).should_receive('get').and_return(unauthorized)
workflow.postbuild_plugins_conf = []
plugin = PulpPullPlugin(tasker, workflow)
with pytest.raises(requests.exceptions.HTTPError):
plugin.run()
def test_forbidden_response(self):
workflow = self.workflow()
tasker = MockerTasker()
forbidden = requests.Response()
flexmock(forbidden,
status_code=requests.codes.forbidden,
request=requests.Request(url='https://crane.example.com'))
expectation = flexmock(requests.Session).should_receive('get')
expectation.and_return(forbidden)
expectation.and_return(self.config_response_config_v1)
expectation.and_return(self.config_response_config_v2)
expectation.and_return(self.config_response_config_v2_list)
# No OCI support in Pulp at the moment, will return a v1 response
expectation.and_return(self.config_response_config_v1)
expectation.and_return(self.config_response_config_v1)
workflow.postbuild_plugins_conf = []
plugin = PulpPullPlugin(tasker, workflow, timeout=0.1,
retry_delay=0.06,
expect_v2schema2=True)
plugin.run()
|
|
#!/usr/bin/python3
"""
History:
--------
10/2/1016 - G.Guillou - Creation
"""
import getopt
import sys
import os
import binascii
import threading
import random
import time
import boto3
from boto3.s3.transfer import S3Transfer
def usage():
"""
usage: displays the module help. Type createami.py -h for more details.
"""
print("""
CREATEAMI() CREATEAMI()
NAME
createami.py - Export an OVA file to AWS and turn it into an AMI
DESCRIPTION
This script creates an AWS AMI from an OVA file
SYNOPSIS
createami.py [-h] [-v] [-f {file}] [-b {bucket}] -k [-p {profile}]
OPTIONS
-h, --help
displays export-ami.py usage and help
-v, --verbose
runs in verbose mode
-f {file}, --file={file}
points to the OVA file to load into AWS
-b {bucket}, --bucket={bucket}
specifies the bucket to use as a staging to load the OVA into AWS
-k, --keep
let the OVA on the bucket; otherwise delete it once exported into an AMI
-p {profile}, --profile={profile}
defines the AWS CLI profile to use to connect to AWS. If none, the
programm uses the default profile. As a result, ~/.aws/credentials
must exist and be properly configured
PRE-REQUISITES
A number of pre-requisites must be met, including the fact AWS Python
SDK
HISTORY
2016 -- written by Gregory P. Guillou (gregory.guillou@resetlogs.com)
CREATEAMI() CREATEAMI()
""")
class ProgressPercentage(object):
"""
displays percentage of a S3 file upload; see S3Transferfor more
informations.
"""
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
class ImportOVA:
"""
provides methods to import an .ova file into an AMI.
"""
def __init__(self, profile):
self.key = ''.join([chr(random.randint(97, 122)) for i in range(0, 10)])
self.profile=profile
def createrole(self, bucket):
"""
createrole: creates a role for AWS VM Import/Export to create an AMI
"""
boto3.setup_default_session(profile_name=self.profile)
client = boto3.client('iam')
rolename = "VMImport4Key"+self.key
policydocument = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "vmie.amazonaws.com" },
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals":{
"sts:ExternalId": "vmimport"
}
}
}]}"""
client.create_role(
RoleName=rolename,
AssumeRolePolicyDocument=policydocument)
policydocument = """{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation"],
"Resource": [
"arn:aws:s3:::"""+bucket+""""]
},{
"Effect": "Allow",
"Action": [
"s3:GetObject"],
"Resource": [
"arn:aws:s3:::"""+bucket+"""/*"]
},{
"Effect": "Allow",
"Action":[
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*"],
"Resource": "*"
}]}"""
client.put_role_policy(
RoleName=rolename,
PolicyName='VMImportS3andEC2Access'+self.key,
PolicyDocument=policydocument)
time.sleep(10)
return rolename
def deleterole(self):
"""
deleterole: deletes the previously created role
"""
boto3.setup_default_session(profile_name=self.profile)
client = boto3.client('iam')
rolename = "VMImport4Key"+self.key
response = client.list_role_policies(
RoleName=rolename)
for policy in response["PolicyNames"]:
client.delete_role_policy(
RoleName=rolename,
PolicyName=policy)
client.delete_role(RoleName=rolename)
def importvm(self, bucket, key):
"""
Imports the VM from the bucket/key
"""
rolename = None
try:
rolename = self.createrole(bucket)
boto3.setup_default_session(profile_name=self.profile)
ec2 = boto3.client('ec2')
response = ec2.import_image(
Description='Import OVA',
DiskContainers=[{
'Description': 'Oracle Linux',
'Format': 'ova',
'UserBucket': {
'S3Bucket': bucket,
'S3Key': key
},
}],
RoleName=rolename)
taskid = response["ImportTaskId"]
status = "new"
ami = "None"
plot = ""
while status != 'completed':
response = ec2.describe_import_image_tasks(ImportTaskIds=[taskid])
try:
ami = response["ImportImageTasks"][0]["ImageId"]
except KeyError:
ami = "undefined"
try:
progress = response["ImportImageTasks"][0]["Progress"]
except KeyError:
progress = "100"
try:
status = response["ImportImageTasks"][0]["StatusMessage"]
except KeyError:
status = 'completed'
sys.stdout.write(
"\r " +
" ")
sys.stdout.write("\rImporting %s (%s) (%s%%) - %s%s" % (
ami, taskid, progress, status, plot))
time.sleep(5)
plot = plot + "."
if plot == ".....":
plot = ""
sys.stdout.write("\n")
self.deleterole()
except Exception:
if rolename:
self.deleterole()
raise
def main():
"""
main: examines call and perform the upload/VM import
"""
profile = "default"
try:
opts, args = getopt.getopt(sys.argv[1:], "hvf:b:p:k", [
"help", "verbose", "file=", "bucket=", "keep", "profile="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
filename = "dist/localhost/localhost.ova"
bucket = "resetlogs"
keep = False
for opt, arg in opts:
if opt == "-v":
pass
elif opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-f", "--file"):
filename = arg
elif opt in ("-b", "--bucket"):
bucket = arg
elif opt in ("-k", "--keep"):
keep = True
elif opt in ("-p", "--profile"):
profile = arg
else:
assert False
pid = os.getpid()
os.environ["PID"] = str(pid)
importova = ImportOVA(profile)
key = importova.key+'/vm.ova'
boto3.setup_default_session(profile_name=profile)
client = boto3.client('s3')
if os.path.exists(filename):
key = str(binascii.b2a_hex(os.urandom(6)))
transfer = S3Transfer(client)
transfer.upload_file(filename, bucket, key,
callback=ProgressPercentage(filename))
else:
print("Error: "+filename+" does not exist")
importova.importvm(bucket, key)
if not keep:
client.delete_object(
Bucket=bucket,
Key=key)
if __name__ == "__main__":
main()
|
|
"""Allows the creation of a sensor that filters state property."""
from collections import Counter, deque
from copy import copy
from datetime import timedelta
from functools import partial
import logging
from numbers import Number
import statistics
from typing import Optional
import voluptuous as vol
from homeassistant.components import history
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.sensor import (
DEVICE_CLASSES as SENSOR_DEVICE_CLASSES,
DOMAIN as SENSOR_DOMAIN,
PLATFORM_SCHEMA,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
FILTER_NAME_RANGE = "range"
FILTER_NAME_LOWPASS = "lowpass"
FILTER_NAME_OUTLIER = "outlier"
FILTER_NAME_THROTTLE = "throttle"
FILTER_NAME_TIME_THROTTLE = "time_throttle"
FILTER_NAME_TIME_SMA = "time_simple_moving_average"
FILTERS = Registry()
CONF_FILTERS = "filters"
CONF_FILTER_NAME = "filter"
CONF_FILTER_WINDOW_SIZE = "window_size"
CONF_FILTER_PRECISION = "precision"
CONF_FILTER_RADIUS = "radius"
CONF_FILTER_TIME_CONSTANT = "time_constant"
CONF_FILTER_LOWER_BOUND = "lower_bound"
CONF_FILTER_UPPER_BOUND = "upper_bound"
CONF_TIME_SMA_TYPE = "type"
TIME_SMA_LAST = "last"
WINDOW_SIZE_UNIT_NUMBER_EVENTS = 1
WINDOW_SIZE_UNIT_TIME = 2
DEFAULT_WINDOW_SIZE = 1
DEFAULT_PRECISION = 2
DEFAULT_FILTER_RADIUS = 2.0
DEFAULT_FILTER_TIME_CONSTANT = 10
NAME_TEMPLATE = "{} filter"
ICON = "mdi:chart-line-variant"
FILTER_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int)}
)
FILTER_OUTLIER_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_OUTLIER,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(CONF_FILTER_RADIUS, default=DEFAULT_FILTER_RADIUS): vol.Coerce(
float
),
}
)
FILTER_LOWPASS_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_LOWPASS,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(
CONF_FILTER_TIME_CONSTANT, default=DEFAULT_FILTER_TIME_CONSTANT
): vol.Coerce(int),
}
)
FILTER_RANGE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_RANGE,
vol.Optional(CONF_FILTER_LOWER_BOUND): vol.Coerce(float),
vol.Optional(CONF_FILTER_UPPER_BOUND): vol.Coerce(float),
}
)
FILTER_TIME_SMA_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_SMA,
vol.Optional(CONF_TIME_SMA_TYPE, default=TIME_SMA_LAST): vol.In(
[TIME_SMA_LAST]
),
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
FILTER_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_THROTTLE,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
}
)
FILTER_TIME_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_THROTTLE,
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): vol.Any(
cv.entity_domain(SENSOR_DOMAIN), cv.entity_domain(BINARY_SENSOR_DOMAIN)
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_FILTERS): vol.All(
cv.ensure_list,
[
vol.Any(
FILTER_OUTLIER_SCHEMA,
FILTER_LOWPASS_SCHEMA,
FILTER_TIME_SMA_SCHEMA,
FILTER_THROTTLE_SCHEMA,
FILTER_TIME_THROTTLE_SCHEMA,
FILTER_RANGE_SCHEMA,
)
],
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
entity_id = config.get(CONF_ENTITY_ID)
filters = [
FILTERS[_filter.pop(CONF_FILTER_NAME)](entity=entity_id, **_filter)
for _filter in config[CONF_FILTERS]
]
async_add_entities([SensorFilter(name, entity_id, filters)])
class SensorFilter(Entity):
"""Representation of a Filter Sensor."""
def __init__(self, name, entity_id, filters):
"""Initialize the sensor."""
self._name = name
self._entity = entity_id
self._unit_of_measurement = None
self._state = None
self._filters = filters
self._icon = None
self._device_class = None
@callback
def _update_filter_sensor_state_event(self, event):
"""Handle device state changes."""
_LOGGER.debug("Update filter on event: %s", event)
self._update_filter_sensor_state(event.data.get("new_state"))
@callback
def _update_filter_sensor_state(self, new_state, update_ha=True):
"""Process device state changes."""
if new_state is None:
_LOGGER.warning(
"While updating filter %s, the new_state is None", self._name
)
self._state = None
self.async_write_ha_state()
return
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
self._state = new_state.state
self.async_write_ha_state()
return
temp_state = new_state
try:
for filt in self._filters:
filtered_state = filt.filter_state(copy(temp_state))
_LOGGER.debug(
"%s(%s=%s) -> %s",
filt.name,
self._entity,
temp_state.state,
"skip" if filt.skip_processing else filtered_state.state,
)
if filt.skip_processing:
return
temp_state = filtered_state
except ValueError:
_LOGGER.error(
"Could not convert state: %s (%s) to number",
new_state.state,
type(new_state.state),
)
return
self._state = temp_state.state
if self._icon is None:
self._icon = new_state.attributes.get(ATTR_ICON, ICON)
if (
self._device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) in SENSOR_DEVICE_CLASSES
):
self._device_class = new_state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if update_ha:
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
if "recorder" in self.hass.config.components:
history_list = []
largest_window_items = 0
largest_window_time = timedelta(0)
# Determine the largest window_size by type
for filt in self._filters:
if (
filt.window_unit == WINDOW_SIZE_UNIT_NUMBER_EVENTS
and largest_window_items < filt.window_size
):
largest_window_items = filt.window_size
elif (
filt.window_unit == WINDOW_SIZE_UNIT_TIME
and largest_window_time < filt.window_size
):
largest_window_time = filt.window_size
# Retrieve the largest window_size of each type
if largest_window_items > 0:
filter_history = await self.hass.async_add_executor_job(
partial(
history.get_last_state_changes,
self.hass,
largest_window_items,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(filter_history[self._entity])
if largest_window_time > timedelta(seconds=0):
start = dt_util.utcnow() - largest_window_time
filter_history = await self.hass.async_add_executor_job(
partial(
history.state_changes_during_period,
self.hass,
start,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(
[
state
for state in filter_history[self._entity]
if state not in history_list
]
)
# Sort the window states
history_list = sorted(history_list, key=lambda s: s.last_updated)
_LOGGER.debug(
"Loading from history: %s",
[(s.state, s.last_updated) for s in history_list],
)
# Replay history through the filter chain
for state in history_list:
if state.state not in [STATE_UNKNOWN, STATE_UNAVAILABLE, None]:
self._update_filter_sensor_state(state, False)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._entity], self._update_filter_sensor_state_event
)
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ENTITY_ID: self._entity}
@property
def device_class(self):
"""Return device class."""
return self._device_class
class FilterState:
"""State abstraction for filter usage."""
def __init__(self, state):
"""Initialize with HA State object."""
self.timestamp = state.last_updated
try:
self.state = float(state.state)
except ValueError:
self.state = state.state
def set_precision(self, precision):
"""Set precision of Number based states."""
if isinstance(self.state, Number):
value = round(float(self.state), precision)
self.state = int(value) if precision == 0 else value
def __str__(self):
"""Return state as the string representation of FilterState."""
return str(self.state)
def __repr__(self):
"""Return timestamp and state as the representation of FilterState."""
return f"{self.timestamp} : {self.state}"
class Filter:
"""Filter skeleton."""
def __init__(
self,
name,
window_size: int = 1,
precision: Optional[int] = None,
entity: Optional[str] = None,
):
"""Initialize common attributes.
:param window_size: size of the sliding window that holds previous values
:param precision: round filtered value to precision value
:param entity: used for debugging only
"""
if isinstance(window_size, int):
self.states = deque(maxlen=window_size)
self.window_unit = WINDOW_SIZE_UNIT_NUMBER_EVENTS
else:
self.states = deque(maxlen=0)
self.window_unit = WINDOW_SIZE_UNIT_TIME
self.precision = precision
self._name = name
self._entity = entity
self._skip_processing = False
self._window_size = window_size
self._store_raw = False
self._only_numbers = True
@property
def window_size(self):
"""Return window size."""
return self._window_size
@property
def name(self):
"""Return filter name."""
return self._name
@property
def skip_processing(self):
"""Return whether the current filter_state should be skipped."""
return self._skip_processing
def _filter_state(self, new_state):
"""Implement filter."""
raise NotImplementedError()
def filter_state(self, new_state):
"""Implement a common interface for filters."""
fstate = FilterState(new_state)
if self._only_numbers and not isinstance(fstate.state, Number):
raise ValueError(f"State <{fstate.state}> is not a Number")
filtered = self._filter_state(fstate)
filtered.set_precision(self.precision)
if self._store_raw:
self.states.append(copy(FilterState(new_state)))
else:
self.states.append(copy(filtered))
new_state.state = filtered.state
return new_state
@FILTERS.register(FILTER_NAME_RANGE)
class RangeFilter(Filter):
"""Range filter.
Determines if new state is in the range of upper_bound and lower_bound.
If not inside, lower or upper bound is returned instead.
"""
def __init__(
self,
entity,
precision: Optional[int] = DEFAULT_PRECISION,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None,
):
"""Initialize Filter.
:param upper_bound: band upper bound
:param lower_bound: band lower bound
"""
super().__init__(FILTER_NAME_RANGE, precision=precision, entity=entity)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._stats_internal = Counter()
def _filter_state(self, new_state):
"""Implement the range filter."""
if self._upper_bound is not None and new_state.state > self._upper_bound:
self._stats_internal["erasures_up"] += 1
_LOGGER.debug(
"Upper outlier nr. %s in %s: %s",
self._stats_internal["erasures_up"],
self._entity,
new_state,
)
new_state.state = self._upper_bound
elif self._lower_bound is not None and new_state.state < self._lower_bound:
self._stats_internal["erasures_low"] += 1
_LOGGER.debug(
"Lower outlier nr. %s in %s: %s",
self._stats_internal["erasures_low"],
self._entity,
new_state,
)
new_state.state = self._lower_bound
return new_state
@FILTERS.register(FILTER_NAME_OUTLIER)
class OutlierFilter(Filter):
"""BASIC outlier filter.
Determines if new state is in a band around the median.
"""
def __init__(self, window_size, precision, entity, radius: float):
"""Initialize Filter.
:param radius: band radius
"""
super().__init__(FILTER_NAME_OUTLIER, window_size, precision, entity)
self._radius = radius
self._stats_internal = Counter()
self._store_raw = True
def _filter_state(self, new_state):
"""Implement the outlier filter."""
median = statistics.median([s.state for s in self.states]) if self.states else 0
if (
len(self.states) == self.states.maxlen
and abs(new_state.state - median) > self._radius
):
self._stats_internal["erasures"] += 1
_LOGGER.debug(
"Outlier nr. %s in %s: %s",
self._stats_internal["erasures"],
self._entity,
new_state,
)
new_state.state = median
return new_state
@FILTERS.register(FILTER_NAME_LOWPASS)
class LowPassFilter(Filter):
"""BASIC Low Pass Filter."""
def __init__(self, window_size, precision, entity, time_constant: int):
"""Initialize Filter."""
super().__init__(FILTER_NAME_LOWPASS, window_size, precision, entity)
self._time_constant = time_constant
def _filter_state(self, new_state):
"""Implement the low pass filter."""
if not self.states:
return new_state
new_weight = 1.0 / self._time_constant
prev_weight = 1.0 - new_weight
new_state.state = (
prev_weight * self.states[-1].state + new_weight * new_state.state
)
return new_state
@FILTERS.register(FILTER_NAME_TIME_SMA)
class TimeSMAFilter(Filter):
"""Simple Moving Average (SMA) Filter.
The window_size is determined by time, and SMA is time weighted.
"""
def __init__(
self, window_size, precision, entity, type
): # pylint: disable=redefined-builtin
"""Initialize Filter.
:param type: type of algorithm used to connect discrete values
"""
super().__init__(FILTER_NAME_TIME_SMA, window_size, precision, entity)
self._time_window = window_size
self.last_leak = None
self.queue = deque()
def _leak(self, left_boundary):
"""Remove timeouted elements."""
while self.queue:
if self.queue[0].timestamp + self._time_window <= left_boundary:
self.last_leak = self.queue.popleft()
else:
return
def _filter_state(self, new_state):
"""Implement the Simple Moving Average filter."""
self._leak(new_state.timestamp)
self.queue.append(copy(new_state))
moving_sum = 0
start = new_state.timestamp - self._time_window
prev_state = self.last_leak or self.queue[0]
for state in self.queue:
moving_sum += (state.timestamp - start).total_seconds() * prev_state.state
start = state.timestamp
prev_state = state
new_state.state = moving_sum / self._time_window.total_seconds()
return new_state
@FILTERS.register(FILTER_NAME_THROTTLE)
class ThrottleFilter(Filter):
"""Throttle Filter.
One sample per window.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_THROTTLE, window_size, precision, entity)
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the throttle filter."""
if not self.states or len(self.states) == self.states.maxlen:
self.states.clear()
self._skip_processing = False
else:
self._skip_processing = True
return new_state
@FILTERS.register(FILTER_NAME_TIME_THROTTLE)
class TimeThrottleFilter(Filter):
"""Time Throttle Filter.
One sample per time period.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_THROTTLE, window_size, precision, entity)
self._time_window = window_size
self._last_emitted_at = None
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the filter."""
window_start = new_state.timestamp - self._time_window
if not self._last_emitted_at or self._last_emitted_at <= window_start:
self._last_emitted_at = new_state.timestamp
self._skip_processing = False
else:
self._skip_processing = True
return new_state
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""image generation with transformer (attention).
encoder: [Self-Attention, Feed-forward] x n
decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
@registry.register_model
class Imagetransformer(t2t_model.T2TModel):
"""Conditional image generation with attention. See file docstring.
The model admits either a Categorical or discretized mixture of logistic
distributions (DMOL) as the likelihood. When using DMOL for training, double
check that the evaluation metrics also use it.
"""
def body(self, features):
hparams = copy.copy(self._hparams)
targets = features["targets"]
if (hparams.likelihood == cia.DistributionType.DMOL and
(hparams.modality["targets"] !=
modalities.ImageChannelBottomIdentityModality or
hparams.num_channels != 1)):
raise ValueError("When using DMOL for the likelihood,modality['targets'] "
"must be ImageChannelBottomIdentityModality and "
"num_channels must be 1.")
if (not tf.get_variable_scope().reuse and
hparams.mode != tf.contrib.learn.ModeKeys.INFER and
hparams.modality["targets"] !=
modalities.ImageChannelBottomIdentityModality):
tf.summary.image("targets", tf.to_float(targets), max_outputs=1)
# Extra losses list if we want to use moe.
losses = []
# Prepare decoder inputs and bias.
decoder_input, rows, cols = cia.prepare_decoder(targets, hparams)
# Add class label to decoder input.
if not hparams.unconditional:
inputs = features["inputs"]
decoder_input += tf.reshape(
inputs,
[common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input,
None,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
losses=losses,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
if losses:
return output, {"extra_loss": tf.add_n(losses)}
else:
return output
def loss(self, logits, features):
if self._hparams.likelihood == cia.DistributionType.DMOL:
return common_layers.dml_loss(logits, features["targets"])
return super(Imagetransformer, self).loss(logits, features)
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
if self._hparams.likelihood == cia.DistributionType.DMOL:
logits, losses = self(features) # pylint: disable=not-callable
samples = common_layers.sample_from_discretized_mix_logistic(
logits, seed=None)
return samples, logits, losses
return super(Imagetransformer, self).sample(features)
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
samples: an integer `Tensor`.
logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
losses: a dictionary: {loss-name (string): floating point `Scalar`}
"""
if self._hparams.likelihood == cia.DistributionType.DMOL:
raise NotImplementedError("Decoding is not currently available for DMOL.")
return super(Imagetransformer, self)._slow_greedy_infer(features,
decode_length)
@registry.register_model
class ImagetransformerMoe(t2t_model.T2TModel):
"""Conditional image generation with attention and MoE."""
@staticmethod
def use_body_sharded():
return True
def body_sharded(self, sharded_features):
dp = self._data_parallelism
hparams = copy.copy(self._hparams)
inputs = sharded_features["inputs"]
targets = sharded_features["targets"]
# Determine attention type and padding from hparams.
q_padding, kv_padding = "VALID", "VALID"
if hparams.q_filter_width > 1:
q_padding = "LEFT"
if hparams.kv_filter_width > 1:
kv_padding = "LEFT"
# Prepare decoder inputs and bias.
decoder_input, rows, cols = dp(cia.prepare_decoder_inputs,
inputs, targets, hparams)
# Run decoder.
# TODO(nikip): Use q_padding and kv_padding
del q_padding, kv_padding
decoder_output, extra_loss = cia.transformer_layers_sharded(
dp,
self._ps_devices,
decoder_input,
hparams.num_hidden_layers,
hparams,
self_attention_bias=None,
enc_output=None,
attention_type=hparams.dec_attention_type,
name="decoder")
output = dp(cia.create_output, decoder_output, rows, cols, targets, hparams)
return output, extra_loss
@registry.register_hparams
def image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 4
hparams.max_length = 3075
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.modality["targets"] = modalities.IdentityModality
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# dilated attention based flags
hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64])
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 12)
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D)
hparams.add_hparam("block_raster_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("likelihood", cia.DistributionType.CAT)
hparams.add_hparam("unconditional", False) # unconditional generation
# parameters of discretized mixture of logistics loss from pixel cnn++
hparams.add_hparam("num_mixtures", 10)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 8
hparams.moe_loss_coef = 1e-3
# These parameters are for relative attention
hparams.add_hparam("shared_rel", False) # share relative embeddings
return hparams
@registry.register_hparams
def imagetransformer_base():
hparams = image_transformer_base()
return hparams
@registry.register_hparams
def imagetransformer_cifar10_base():
"""Best config for 2.90 bits/dim on CIFAR10 using cross entropy."""
hparams = image_transformer_base()
hparams.batch_size = 4
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_cifar10_base_dmol():
"""Best config for 2.90 bits/dim on CIFAR10 using DMOL."""
hparams = image_transformer_base()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.modality["targets"] = modalities.ImageChannelBottomIdentityModality
hparams.num_heads = 8
hparams.batch_size = 8
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.summarize_grads = True
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.attention_key_channels = 512
hparams.attention_value_channels = 512
hparams.num_decoder_layers = 12
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate = 0.1
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.pos = "emb"
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_tpu():
"""Transformer base params for cifar-10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 6000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_imagenet_tpu():
"""Transformer base params for cifar-10."""
hparams = imagetransformer_base_tpu()
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_imagenet32_base():
"""Best config for ImageNet-32 with 3.77 bits/dim using cross entropy."""
hparams = imagetransformer_cifar10_base()
hparams.batch_size = 4
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_rel():
"""Base with relative attention."""
hparams = imagetransformer_base()
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_hparams
def imagetransformer_sep_channels():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_multipos3():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.q_filter_width = 3
hparams.kv_filter_width = 3
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64():
"""big 1d model for unconditional generation on imagenet."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.unconditional = True
hparams.max_length = 14000
hparams.batch_size = 1
hparams.img_len = 64
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.modality["targets"] = modalities.ImageChannelBottomIdentityModality
hparams.num_heads = 8
hparams.batch_size = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.summarize_grads = True
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams
@registry.register_hparams
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a():
hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan()
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan():
hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a()
hparams.unconditional = True
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_a():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan()
hparams.learning_rate = 0.01
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan()
hparams.learning_rate = 0.1
hparams.hidden_size = 256
hparams.attention_key_channels = 512
hparams.attention_value_channels = 512
hparams.filter_size = 1024
return hparams
@registry.register_hparams
def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b()
hparams.filter_size = 512
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate = 0.1
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.pos = "emb"
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
hparams.num_decoder_layers = 12
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l():
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
hparams.num_decoder_layers = 12
hparams.clip_grad_norm = 40.
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k()
hparams.batch_size = 8
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k()
hparams.batch_size = 8
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_relsh():
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel()
hparams.shared_rel = True
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_bs1():
"""For 128x128."""
# TODO(trandustin): why are these running? max_length and img_len not set
# 256x256 was also training without setting max_length
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p_bs1():
"""For 128x128."""
hparams = imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p()
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1():
"""For 256x256."""
hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g()
# TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in
# image transformer training implementation?
# hparams.img_len = 256
hparams.max_length = 66000 # allow for 256x256
hparams.batch_size = 1
hparams.num_decoder_layers = 5
hparams.hidden_size = 128
hparams.filter_size = 128
hparams.attention_key_channels = 64
hparams.attention_value_channels = 64
hparams.layer_prepostprocess_dropout = 0.0
return hparams
@registry.register_hparams
def imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor():
"""For 256x256."""
hparams = imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1()
# Use Adafactor which uses less memory than Adam, and its recommendations.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
return hparams
@registry.register_hparams
def imagetransformerpp_base_6l_8h_dr00_dan_g_bs1_adafactor():
"""For 256x256."""
hparams = imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor()
hparams.num_decoder_layers = 6
return hparams
@registry.register_hparams
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_eval():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
# hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_128():
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.block_width = 128
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_cond_dr03_dan():
"""Best conditional Cifar10 gen param."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan():
"""Best unconditional Cifar10 gen param."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]
hparams.dec_attention_type = cia.AttentionType.DILATED
hparams.block_length = 128
hparams.block_width = 128
hparams.add_hparam("num_memory_blocks", 1)
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_b():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 64
hparams.num_memory_blocks = 2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_c():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 32
hparams.num_memory_blocks = 4
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0]
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.filter_size = 1024
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.hidden_size = 512
hparams.learning_rate_warmup_steps = 4000
hparams.sampling_method = "random"
hparams.beam_size = 1
hparams.block_width = 256
return hparams
@registry.register_hparams
def imagetransformer1d_base_8l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer1d_base_12l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_dr01():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big()
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big_uncond()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_16h_imagenet_large():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 1
hparams.filter_size = 2048
hparams.num_heads = 16
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_sep_output_channels_8l_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.unconditional = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 4
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 8
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_tiny():
hparams = imagetransformer_base()
hparams.num_decoder_layers = 2
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.unconditional = True
hparams.max_length = 66000 # allow for 256x256
return hparams
@registry.register_hparams
def imagetransformerpp_tiny():
hparams = imagetransformer_tiny()
hparams.likelihood = cia.DistributionType.DMOL
hparams.num_channels = 1
hparams.modality["targets"] = modalities.ImageChannelBottomIdentityModality
return hparams
@registry.register_hparams
def imagetransformer_tiny_tpu():
hparams = imagetransformer_tiny()
update_hparams_for_tpu(hparams)
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.batch_size = 2
hparams.num_heads = 2
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_moe_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_10l_16h_big_dr01_imgnet()
hparams.initializer = "orthogonal"
hparams.learning_rate_warmup_steps = 16000
hparams.add_hparam("moe_layers_decoder", "2,7") # Which layer is MoE.
hparams.moe_hidden_sizes = "4096" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 64 # Number of experts in each MoE layer.
hparams.moe_k = 4 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 3e-2 # MoE loss coefficient (1e-2 is usually ok).
hparams.scheduled_sampling_prob = 0.1
hparams.scheduled_sampling_warmup_steps = 200000
return hparams
@registry.register_hparams
def imagetransformer_moe_tiny():
"""Set of hyperparameters for a very small imagetransformer with MoE."""
hparams = imagetransformer_tiny()
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.num_hidden_layers = 3
hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D
hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE.
hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 16 # Number of experts in each MoE layer.
hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok).
return hparams
def update_hparams_for_tpu(hparams):
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 6000
hparams.batch_size = 4
@registry.register_hparams
def imagetransformer_sep_channels_8l_tpu():
"""Hparams for training imagetransformer on tpu."""
hparams = imagetransformer_sep_channels_8l()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr03_tpu():
"""Small model for tpu cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.learning_rate = 0.2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
return hparams
@registry.register_hparams
def imagetransformer_b10l_dr03_moe_tpu():
"""Moe tpu params."""
hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.ffn_layer = "local_moe_tpu"
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr03_lr025_tpu():
"""TPU related small model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 10
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 8000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
# hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_big_uncond_dr03_tpu():
"""TPU 12 layer model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 1024
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_big_uncond_dr03_lr025_tpu():
hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 5000
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b256_uncond_dr03_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b256_uncond_dr03_rel_tpu():
"""works very well on 4x4."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.shared_rel = True
hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D
return hparams
@registry.register_ranged_hparams
def imagetransformer_cifar_tpu_range(rhp):
"""Range of hyperparameters for vizier."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16])
rhp.set_discrete("hidden_size", [256, 512, 1024])
rhp.set_discrete("block_length", [128, 256, 512])
rhp.set_categorical("dec_attention_type", [
cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D])
@registry.register_hparams
def imagetransformer_b12l_4h_b128_h512_uncond_dr03_tpu():
"""TPU related big model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 6000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im():
"""TPU related imagenet model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 6000
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_uncond_dr03_tpu():
"""TPU related small model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu():
"""TPU config for cifar 10."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 256
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams
@registry.register_hparams
def imagetransformer_b12l_8h_b256_uncond_dr03_tpu():
"""TPU related 12 layer 8 heads model."""
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 8 # heads are expensive on tpu
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_b10l_4h_big_uncond_dr01_tpu():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 4
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
|
|
import datetime
import json
import os
import sys
import threading
from django import db
from django.db import models
from django.forms.models import model_to_dict
from django.core.urlresolvers import reverse
from django.forms.models import model_to_dict
from django.utils import timezone
from django.core.exceptions import PermissionDenied
import synchronizers.model_policy
from model_autodeletion import ephemeral_models
from cgi import escape as html_escape
try:
# This is a no-op if observer_disabled is set to 1 in the config file
from synchronizers.base import *
except:
print >> sys.stderr, "import of observer failed! printing traceback and disabling observer:"
import traceback
traceback.print_exc()
# guard against something failing
def notify_observer(*args, **kwargs):
pass
class StrippedCharField(models.CharField):
""" CharField that strips trailing and leading spaces."""
def clean(self, value, *args, **kwds):
if value is not None:
value = value.strip()
return super(StrippedCharField, self).clean(value, *args, **kwds)
# This manager will be inherited by all subclasses because
# the core model is abstract.
class PlCoreBaseDeletionManager(models.Manager):
def get_queryset(self):
parent=super(PlCoreBaseDeletionManager, self)
if hasattr(parent, "get_queryset"):
return parent.get_queryset().filter(deleted=True)
else:
return parent.get_query_set().filter(deleted=True)
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
# This manager will be inherited by all subclasses because
# the core model is abstract.
class PlCoreBaseManager(models.Manager):
def get_queryset(self):
parent=super(PlCoreBaseManager, self)
if hasattr(parent, "get_queryset"):
return parent.get_queryset().filter(deleted=False)
else:
return parent.get_query_set().filter(deleted=False)
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class PlModelMixIn(object):
# Provides useful methods for computing which objects in a model have
# changed. Make sure to do self._initial = self._dict in the __init__
# method.
# Also includes useful utility, like getValidators
# This is broken out of PlCoreBase into a Mixin so the User model can
# also make use of it.
@property
def _dict(self):
return model_to_dict(self, fields=[field.name for field in
self._meta.fields])
def fields_differ(self,f1,f2):
if isinstance(f1,datetime.datetime) and isinstance(f2,datetime.datetime) and (timezone.is_aware(f1) != timezone.is_aware(f2)):
return True
else:
return (f1 != f2)
@property
def diff(self):
d1 = self._initial
d2 = self._dict
diffs = [(k, (v, d2[k])) for k, v in d1.items() if self.fields_differ(v,d2[k])]
return dict(diffs)
@property
def has_changed(self):
return bool(self.diff)
@property
def changed_fields(self):
return self.diff.keys()
def has_field_changed(self, field_name):
return field_name in self.diff.keys()
def get_field_diff(self, field_name):
return self.diff.get(field_name, None)
#classmethod
def getValidators(cls):
""" primarily for REST API, return a dictionary of field names mapped
to lists of the type of validations that need to be applied to
those fields.
"""
validators = {}
for field in cls._meta.fields:
l = []
if field.blank==False:
l.append("notBlank")
if field.__class__.__name__=="URLField":
l.append("url")
validators[field.name] = l
return validators
def get_backend_register(self, k, default=None):
try:
return json.loads(self.backend_register).get(k, default)
except AttributeError:
return default
def set_backend_register(self, k, v):
br = {}
try:
br=json.loads(self.backend_register)
except AttributeError:
br={}
br[k] = v
self.backend_register = json.dumps(br)
def get_backend_details(self):
try:
scratchpad = json.loads(self.backend_register)
except AttributeError:
return (None, None, None, None)
try:
exponent = scratchpad['exponent']
except KeyError:
exponent = None
try:
last_success_time = scratchpad['last_success']
dt = datetime.datetime.fromtimestamp(last_success_time)
last_success = dt.strftime("%Y-%m-%d %H:%M")
except KeyError:
last_success = None
try:
failures = scratchpad['failures']
except KeyError:
failures=None
try:
last_failure_time = scratchpad['last_failure']
dt = datetime.datetime.fromtimestamp(last_failure_time)
last_failure = dt.strftime("%Y-%m-%d %H:%M")
except KeyError:
last_failure = None
return (exponent, last_success, last_failure, failures)
def get_backend_icon(self):
is_perfect = (self.backend_status is not None) and self.backend_status.startswith("1 -")
is_good = (self.backend_status is not None) and (self.backend_status.startswith("0 -") or self.backend_status.startswith("1 -"))
is_provisioning = self.backend_status is None or self.backend_status == "Provisioning in progress" or self.backend_status==""
# returns (icon_name, tooltip)
if (self.enacted is not None) and (self.enacted >= self.updated and is_good) or is_perfect:
return ("success", "successfully enacted")
else:
if is_good or is_provisioning:
return ("clock", "Pending sync, last_status = " + html_escape(self.backend_status, quote=True))
else:
return ("error", html_escape(self.backend_status, quote=True))
def enforce_choices(self, field, choices):
choices = [x[0] for x in choices]
for choice in choices:
if field==choice:
return
if (choice==None) and (field==""):
# allow "" and None to be equivalent
return
raise Exception("Field value %s is not in %s" % (field, str(choices)))
class PlCoreBase(models.Model, PlModelMixIn):
objects = PlCoreBaseManager()
deleted_objects = PlCoreBaseDeletionManager()
# default values for created and updated are only there to keep evolution
# from failing.
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(default=timezone.now)
enacted = models.DateTimeField(null=True, blank=True, default=None)
policed = models.DateTimeField(null=True, blank=True, default=None)
# This is a scratchpad used by the Observer
backend_register = models.CharField(max_length=1024,
default="{}", null=True)
backend_status = models.CharField(max_length=1024,
default="0 - Provisioning in progress")
deleted = models.BooleanField(default=False)
write_protect = models.BooleanField(default=False)
lazy_blocked = models.BooleanField(default=False)
no_sync = models.BooleanField(default=False) # prevent object sync
no_policy = models.BooleanField(default=False) # prevent model_policy run
class Meta:
# Changing abstract to False would require the managers of subclasses of
# PlCoreBase to be customized individually.
abstract = True
app_label = "core"
def __init__(self, *args, **kwargs):
super(PlCoreBase, self).__init__(*args, **kwargs)
self._initial = self._dict # for PlModelMixIn
self.silent = False
def get_controller(self):
return self.controller
def can_update(self, user):
return user.can_update_root()
def delete(self, *args, **kwds):
# so we have something to give the observer
purge = kwds.get('purge',False)
if purge:
del kwds['purge']
silent = kwds.get('silent',False)
if silent:
del kwds['silent']
try:
purge = purge or observer_disabled
except NameError:
pass
if (purge):
super(PlCoreBase, self).delete(*args, **kwds)
else:
if (not self.write_protect):
self.deleted = True
self.enacted=None
self.policed=None
self.save(update_fields=['enacted','deleted','policed'], silent=silent)
def save(self, *args, **kwargs):
# let the user specify silence as either a kwarg or an instance varible
silent = self.silent
if "silent" in kwargs:
silent=silent or kwargs.pop("silent")
# SMBAKER: if an object is trying to delete itself, or if the observer
# is updating an object's backend_* fields, then let it slip past the
# composite key check.
ignore_composite_key_check=False
if "update_fields" in kwargs:
ignore_composite_key_check=True
for field in kwargs["update_fields"]:
if not (field in ["backend_register", "backend_status", "deleted", "enacted", "updated"]):
ignore_composite_key_check=False
if 'synchronizer' not in threading.current_thread().name:
self.updated = timezone.now()
super(PlCoreBase, self).save(*args, **kwargs)
# This is a no-op if observer_disabled is set
# if not silent:
# notify_observer()
self._initial = self._dict
def save_by_user(self, user, *args, **kwds):
if not self.can_update(user):
if getattr(self, "_cant_update_fieldName", None) is not None:
raise PermissionDenied("You do not have permission to update field %s on object %s" % (self._cant_update_fieldName, self.__class__.__name__))
else:
raise PermissionDenied("You do not have permission to update %s objects" % self.__class__.__name__)
self.save(*args, **kwds)
def delete_by_user(self, user, *args, **kwds):
if not self.can_update(user):
raise PermissionDenied("You do not have permission to delete %s objects" % self.__class__.__name__)
self.delete(*args, **kwds)
@classmethod
def select_by_user(cls, user):
# This should be overridden by descendant classes that want to perform
# filtering of visible objects by user.
return cls.objects.all()
@classmethod
def is_ephemeral(cls):
return cls in ephemeral_models
def tologdict(self):
try:
d = {'model_name':self.__class__.__name__, 'pk': self.pk}
except:
d = {}
return d
|
|
# extract-snippets.py v1.2.0 6/24/2021
# Jerry Kindall, Amazon Web Services
# extracts tagged regions from source files and writes them to a snippets directory.
# reads list of paths from stdin and extracts snippets from these files. Takes the
# directory to which snippets should be extracted as a command line argument. The
# second command line argument is optional and specifies the YAML file that contains
# a map of filename extensions to comment markers (default: snippet-extensions.yml
# in the same directory as this script)
# examples:
#
# extract snippets from last commit on current git branch to /tmp
# git diff @^ --name-only | python3 extract-snippets.py /tmp
#
# extract snippets from all files to specified directory
# find . -type f | python3 extract-snippets.py /path/to/snippets/dir
#
# extract snippets from all files in current dir to current dir,
# specifying a different filename map
# ls | python3 extract-snippets.py . snippet-extensions-more.yml
# The same snippet can be extracted from more than one source file ONLY if all
# source files containing the snippet have the same filename and contents.
# this is to support e.g. Lambda functions deployed by a CDK example, where the
# CDK app is provided in multiple languages but the same Lambda function source
# code (snippet tags included) is used in each version. Ideally the snippet tags
# would be removed from all but one of the Lambda source files... ideally.
# This script also presents an index mapping snippet names back to the files
# they come from, and a list of source files having problems.
import sys, os, io, yaml, re, functools
# all open() calls have an implied encoding parameter, UTF-8 by default
open = functools.partial(__builtins__.open,
encoding=os.environ.get("SOURCE_ENCODING", "utf8"))
# some constants to make our lives easier
TAB = "\t"
EOL = "\n"
# regular expression for matching dedent specifier: 1 or 2 digits
DIGITS = re.compile("[0-9][0-9]?")
# returns cached contents of a file if it exists, or reads it into the cache and
# returns it if not. cache is stored as a default parameter value.
#
# the cache is used only when there are duplicate snippets in two or more source files.
# only one copy of the file is ever cached (the first one that was found) so this shouldn't
# run up memory too much if you don't have many duplicate snippets.
def cached(path, cache={}):
if path not in cache:
with open(path) as infile:
cache[path] = infile.read().rstrip()
return cache[path]
# a file-like object used to avoid writing duplicate snippets we've already extracted
# in situations where this is not an error
class DummyFile:
def __init__(self, *args, **kwargs):
pass
def write(self, text):
pass
def close(self):
pass
# auto-vivifying dict (like DefaultDict but we don't need to import it)
class AutoDict(dict):
def __init__(self, T):
self.T = T
def __missing__(self, key):
self[key] = self.T()
return self[key]
# the class that does the snippet extraction. instantiate it passing the directory to
# which snippets should be extracted. call the instance with each source file.
class Snipper:
# initialize Snipper
def __init__(self, snippetdir):
self.dir = snippetdir # directory where snippets will be extracted
self.source = {} # source file of each snippet
self.count = 0 # number of snippets extracted
self.errors = 0 # processing errors
self.issues = AutoDict(set) # files with issues
self.index = AutoDict(list) # index of snippets to files (this should probably be merged with self.source)
self.log = io.StringIO()
# if used as context manager, we capture the log instead of printing it as we go
# by switching print() to print to a StringIO object
def __enter__(self):
global print
print = functools.partial(__builtins__.print, file=self.log)
return self
def __exit__(self, *args):
global print
print = __builtins__.print
# extract snippets from a single file
def __call__(self, path, markers):
print(path)
self.started = set() # snippets we've started in this source file
self.duplicates = set() # snippets we've determined are duplicates so we won't append/echo
tag = re.compile(f" *({'|'.join(markers)}) ?snippet-") # e.g. if ext is "// #" end up with regex: " *(#|//) ?snippet-"
self.files = {} # files currently open to write snippets
self.dedent = {} # amount of whitespace to strip from each line of snippet
self.path = path # source file we are working with (store it on instance so we can use it in error messages)
self.markers = markers
try:
with open(path) as infile: # read source file entirely into memory
self.text = infile.read().rstrip()
except IOError as ex:
print("ERROR reading file", ex)
self.errors += 1
return
if TAB in self.text and "snippet-start" in self.text:
print(" WARNING tab(s) found in %s may cause formatting problems in docs" % path)
# process each line in source file. self.i is the line we're on (for error messages)
for self.i, self.line in enumerate(self.text.splitlines(keepends=False), start=1):
line = self.line # use a local variable for a bit more performance
if tag.match(line): # line is a snippet directive, parse and process it
self.directive = line.split("snippet-")[1].split(":")[0].rstrip() # get e.g. append fron snippet-append
self.arg = line.split("[")[1].split("]")[0].rstrip() # get e.g. snippet-name from [snippet-name]
func = getattr(self, self.directive.lstrip("_"), None)
if func and callable(func):
func(self.arg) # call our method named same as directive (e.g. start(..) for snippet-start)
else:
print(" ERROR invalid directive snippet-%s at %s in %s" % (self.directive, self.i, self.path))
self.errors += 1
self.issues[path].add("invalid directive snippet-%s" % self.directive)
else: # line is NOT a snippet directive. write it to any open snippet files
for snip, file in self.files.items(): # for each snippet file we're writing, write the line
dedent = self.dedent[snip]
if dedent and line[:dedent].strip(): # is the text we want to strip to dedent all whitespace? error if not
print((" ERROR unable to dedent %s space(s) " % dedent) +
("in snippet %s at line %s in %s " % self._where) +
f"(only indented {len(line) - len(line.lstrip())} spaces)")
self.errors += 1
file.write(line[dedent:].rstrip() + EOL) # write it (strip whitespace at end just to be neat)
# done processing this file. make sure all snippets had snippet-end tags
for snip, file in self.files.items():
print(" ERROR snippet-end tag for %s missing in %s, extracted to end of file" % (snip, path))
file.close()
self.issues[path].add("snippet-end tag for %s missing" % snip)
self.errors += 1
# directive: beginning of snippet
def start(self, arg):
path = os.path.join(self.dir, f"{arg}.txt")
indicator = "EXTRACT"
opener = open
printer = print
if arg in self.files:
printer = lambda *a: print(" ERROR snippet %s already open at line %s in %s" % self._where)
self.issues[self.path].add("snippet %s opened multiple times")
self.errors += 1
elif os.path.isfile(path):
# if snippet output already exists, this is OK only if it source file has the same name and identical content
if self.path != self.source[arg] and self.path.rpartition("/")[2] == self.source[arg].rpartition("/")[2] and self.text == cached(self.source[arg]):
printer = lambda *a: print("WARNING redundant snippet %s at line %s in %s" % self._where)
self.duplicates.add(arg)
else:
printer = lambda *a: print(" ERROR duplicate snippet %s at line %s in %s" % self._where,
"(also in %s)" % self.source[arg])
pfxlen = len(os.path.commonprefix([self.path, self.source[arg]]))
path1 = self.source[arg][pfxlen:]
if "/" not in path1: path1 = self.source[arg]
path2 = self.path[pfxlen:]
if "/" not in path2: path2 = self.path
self.issues[self.path].add("%s also declared in %s" % (arg, path1))
self.issues[self.source[arg]].add("%s also declared in %s" % (arg, path2))
self.errors += 1
opener = DummyFile # don't write to the file, but still track it so we can detect missing snippet-end
else:
self.count += 1
# parse number at end of line as dedent value
self.dedent[arg] = int(DIGITS.search(self.line.rpartition("]")[2] + " 0").group(0))
self.files[arg] = opener(path, "w") # open real file or dummy
self.index[arg].append(self.path)
self.started.add(arg) # record that we started this snippet in this source file
if arg not in self.source: # record that we *first* saw this snippet in this source file
self.source[arg] = self.path
printer(" ", indicator, arg)
# directive: append to given file (for extracting multiple chunks of code to a single snippet)
def append(self, arg):
if arg in self.files: # is the file already open?
print(" ERROR snippet %s already open at line %s in %s" % self._where)
self.issues[self,path].add("snippet %s opened multiple times" % arg)
self.errors += 1
return
if arg not in self.started: # did we start this snippet in current source file?
print(" ERROR snippet file %s not found at line %s in %s" % self._where)
self.issues[self.path].add("snippet %s doesn't exist" % arg)
self.errors += 1
return
self.files[arg] = DummyFile() if arg in self.duplicates else open(os.path.join(self.dir, arg) + ".txt", "a")
print(" APPEND", arg)
# directive: end of snippet
def end(self, arg):
if arg in self.files:
self.files[arg].close()
del self.files[arg]
else:
print(" ERROR snippet file %s not open at %s in %s" % self._where)
self.issues[self.path].add("snippet-end tag for %s which is not open" % arg)
self.errors += 1
# directive: insert arg verbatim as a line into all currently open snippets
# useful for e.g. adding closing brackets to partial code block (could also use append for that)
def echo(self, arg):
arg = arg.rstrip() + EOL
if self.files:
for file in self.files.values():
file.write(arg)
else:
print(" ERROR echo '%s' outside snippet at %s in %s" % self._where)
self.issues[self.path].add("echo outside snippet")
self.errors += 1
# do-nothing handler used for directives that we ignore
def _nop(self, arg): return
# the aforementioned ignored directives
service = comment = keyword = sourceauthor = sourcedate = sourcedescription = sourcetype = sourcesyntax = _nop
# convenience property for returning error location tuple (used in error messages)
@property
def _where(self):
return self.arg, self.i, self.path
def err_exit(msg):
print("ERROR", msg)
sys.exit(1)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
# read list of filenames from stdin first, so we don't get broken pipe if we error out
stdin_lines = []
if not sys.stdin.isatty():
stdin_lines = sys.stdin.readlines()
# get output directory from command line, or error
if len(sys.argv) > 1 and os.path.isdir(sys.argv[1]):
snippetdir = sys.argv[1]
else:
err_exit("snippet output directory not passed or does not exist")
# get filename of extersions list from command line, or use default, then load it
if len(sys.argv) > 2:
commentfile = sys.argv[2]
else:
commentfile = "snippet-extensions.yml"
# reports to be printed can be passed in via environment variable REPORTS
# if this value is not set, print all reports
reports = os.environ.get("REPORTS", "log issues index").lower().split()
# if no directory specified, file is in same directory as script
if "/" not in commentfile and "\\" not in commentfile:
commentfile = os.path.join(os.path.dirname(__file__), commentfile)
if not os.path.isfile(commentfile):
err_exit("source file extension map %s not found" % commentfile)
with open(commentfile) as comments:
MAP_EXT_MARKER = yaml.safe_load(comments)
if not isinstance(MAP_EXT_MARKER, dict):
err_exit("source map is not a key-value store (dictionary)")
for k, v in MAP_EXT_MARKER.items():
if isinstance(k, str) and isinstance(v, str):
MAP_EXT_MARKER[k] = v.split()
else:
err_exit("key, value must both be strings; got %s, %s (%s, %s)" %
(k, v, type(k).__name__, type(v).__name__))
print("==== extracting snippets in source files",
" ".join(ex for ex in MAP_EXT_MARKER if ex and MAP_EXT_MARKER[ex]), "\n")
print("reports:", " ".join(reports).upper(), end="\n\n")
# initialize snipper instance and our counters
with Snipper(snippetdir) as snipper:
seen = processed = 0
# main loop: for each file named on stdin, check to see if we should process it, and if so, do so
for path in sorted(stdin_lines):
path = path.strip()
if not path: # skip blank lines in input
continue
# make sure relative path starts with ./ so that e.g. /Makefile in the extensions map
# can be used to match an entire filename.
if not (path.startswith(("./", "/", "\\")) or # already relative or Linux/Mac absolute path or UNC path
(path[0].isalpha() and path[1] == ":")): # already Windows absolute path
path = "./" + path
if "/." in path or "\\." in path: # skip hidden file or directory
continue
seen += 1 # count files seen (not hidden)
# find first extension from extension map that matches current file
# replace backslashes with forward slashes for purposes of matching so it works with Windows or UNC paths
ext = next((ext for ext in MAP_EXT_MARKER if path.replace("\\", "/").endswith(ext)), None)
markers = MAP_EXT_MARKER.get(ext, ())
if markers: # process it if we know its comment markers
snipper(path, markers)
processed += 1
# files with issues report (files with most issues first)
if "issues" in reports:
if snipper.issues:
print("====", len(snipper.issues), "file(s) with issues:", end="\n\n")
for issue, details in sorted(snipper.issues.items(), key=lambda item: -len(item[1])):
print(issue, end="\n ")
print(*sorted(details), sep="\n ", end="\n\n")
else:
print("---- no issues found\n")
# snippet index report (snippets that appear in the most files first)
if "index" in reports:
if snipper.index:
print("====", len(snipper.index), "snippet(s) extracted from", processed, "files:", end="\n\n")
for snippet, files in sorted(snipper.index.items(), key=lambda item: -len(item[1])):
print(snippet, "declared in:", end="\n ")
print(*sorted(files), sep="\n ", end="\n\n")
else:
print("--- no snippets were extracted\n")
# print log
if "log" in reports:
print("==== Complete processing log\n")
if processed:
print(snipper.log.getvalue(), end="\n\n")
else:
print("No files were processed\n")
# print summary
print("====", snipper.count, "snippet(s) extracted from", processed,
"source file(s) processed of", seen, "candidate(s) with", snipper.errors,
"error(s) in", len(snipper.issues), "file(s)\n")
# exit with nonzero status if we found any errors, so caller won't commit the snippets
sys.exit(snipper.errors > 0)
|
|
from gitcd.git.server import GitServer
from gitcd.git.branch import Branch
from gitcd.exceptions import GitcdGithubApiException
import requests
class Gitlab(GitServer):
tokenSpace = 'gitlab'
baseUrl = 'https://gitlab.com/api/v4'
def open(
self,
title: str,
body: str,
fromBranch: Branch,
toBranch: Branch
) -> bool:
token = self.configPersonal.getToken(self.tokenSpace)
if token is not None:
projectId = '%s%s%s' % (
self.remote.getUsername(),
'%2F',
self.remote.getRepositoryName()
)
url = '%s/projects/%s/merge_requests' % (
self.baseUrl,
projectId
)
data = {
'source_branch': fromBranch.getName(),
'target_branch': toBranch.getName(),
'title': title,
'description': body
}
headers = {'Private-Token': token}
response = requests.post(
url,
headers=headers,
json=data
)
if response.status_code == 401:
raise GitcdGithubApiException(
"Authentication failed, create a new app password."
)
if response.status_code == 409:
raise GitcdGithubApiException(
"This pull-requests already exists."
)
# anything else but success
if response.status_code != 201:
raise GitcdGithubApiException(
"Open a pull request on gitlab failed."
)
try:
result = response.json()
defaultBrowser = self.getDefaultBrowserCommand()
self.cli.execute("%s %s" % (
defaultBrowser,
result['web_url']
))
except ValueError:
raise GitcdGithubApiException(
"Open a pull request on gitlab failed."
)
else:
defaultBrowser = self.getDefaultBrowserCommand()
self.cli.execute("%s %s" % (
defaultBrowser,
"%s/%s/%s/merge_requests/new?%s=%s" % (
"https://gitlab.com",
self.remote.getUsername(),
self.remote.getRepositoryName(),
'merge_request%5Bsource_branch%5D',
fromBranch.getName()
)
))
return True
def status(self, branch: Branch):
master = Branch(self.config.getMaster())
token = self.configPersonal.getToken(self.tokenSpace)
if token is not None:
data = {
'state': 'biber',
'source_branch': branch.getName(),
'target_branch': master.getName()
}
projectId = '%s%s%s' % (
self.remote.getUsername(),
'%2F',
self.remote.getRepositoryName()
)
baseUrl = "%s/projects/%s/merge_requests" % (
self.baseUrl,
projectId
)
url = "%s?state=opened" % (
baseUrl
)
headers = {'Private-Token': token}
response = requests.get(
url,
headers=headers,
json=data
)
if response.status_code != 200:
raise GitcdGithubApiException(
"Could not fetch open pull requests," +
" please have a look manually."
)
returnValue = {}
result = response.json()
if len(result) > 0:
returnValue['state'] = 'REVIEW REQUIRED'
reviewers = self.isReviewedBy(
"%s/%s/approvals" % (
baseUrl,
result[0]['iid']
)
)
if len(reviewers) == 0 and result[0]['user_notes_count'] > 0:
reviewers = self.getLgtmComments(
"%s/%s/notes" % (
baseUrl,
result[0]['iid']
)
)
if len(reviewers) > 0:
returnValue['state'] = 'APPROVED'
for reviewer in reviewers:
reviewer = reviewers[reviewer]
if reviewer['state'] != 'APPROVED':
returnValue['state'] = reviewer['state']
returnValue['master'] = master.getName()
returnValue['feature'] = branch.getName()
returnValue['reviews'] = reviewers
returnValue['url'] = result[0]['web_url']
returnValue['number'] = result[0]['iid']
return returnValue
def isReviewedBy(self, activityUrl: str) -> dict:
reviewers = {}
token = self.configPersonal.getToken(self.tokenSpace)
if token is not None:
headers = {'Private-Token': token}
response = requests.get(
activityUrl,
headers=headers
)
if response.status_code != 200:
raise GitcdGithubApiException(
"Fetch PR activity for gitlab failed."
)
result = response.json()
if 'approved_by' in result and len(result['approved_by']) > 0:
for approver in result['approved_by']:
reviewer = {}
reviewer['comments'] = []
comment = {}
comment['body'] = 'approved'
comment['state'] = 'APPROVED'
reviewer['state'] = 'APPROVED'
reviewer['comments'].append(comment)
reviewers[approver['user']['username']] = reviewer
return reviewers
def getLgtmComments(self, commentsUrl):
token = self.configPersonal.getToken(self.tokenSpace)
reviewers = {}
if token is not None:
headers = {'Private-Token': token}
response = requests.get(
commentsUrl,
headers=headers
)
if response.status_code != 200:
raise GitcdGithubApiException(
"Fetch PR comments for bitbucket failed."
)
comments = response.json()
if len(comments) > 0:
for comment in comments:
if (
'body' in comment and
'lgtm' in comment['body'].lower()
):
if comment['author']['username'] in reviewers:
reviewer = reviewers[comment['author']['username']]
else:
reviewer = {}
reviewer['comments'] = []
reviewer['state'] = 'APPROVED'
reviewerComment = {}
reviewerComment['state'] = 'APPROVED'
reviewerComment['body'] = comment['body']
reviewer['comments'].append(reviewerComment)
reviewers[comment['author']['username']] = reviewer
return reviewers
|
|
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
import sys
from typing import List, Tuple
from pylint.constants import (
_SCOPE_EXEMPT,
MAIN_CHECKER_NAME,
MSG_STATE_CONFIDENCE,
MSG_STATE_SCOPE_CONFIG,
MSG_STATE_SCOPE_MODULE,
MSG_TYPES,
MSG_TYPES_LONG,
MSG_TYPES_STATUS,
WarningScope,
)
from pylint.exceptions import InvalidMessageError, UnknownMessageError
from pylint.interfaces import UNDEFINED
from pylint.message.message import Message
from pylint.utils import get_module_and_frameid, get_rst_section, get_rst_title
class MessagesHandlerMixIn:
"""A mix-in class containing all the messages related methods for the main lint class."""
__by_id_managed_msgs: List[Tuple[str, str, str, int, bool]] = []
def __init__(self):
self._msgs_state = {}
self.msg_status = 0
def _checker_messages(self, checker):
for known_checker in self._checkers[checker.lower()]:
yield from known_checker.msgs
@classmethod
def clear_by_id_managed_msgs(cls):
cls.__by_id_managed_msgs.clear()
@classmethod
def get_by_id_managed_msgs(cls):
return cls.__by_id_managed_msgs
def _register_by_id_managed_msg(self, msgid_or_symbol: str, line, is_disabled=True):
"""If the msgid is a numeric one, then register it to inform the user
it could furnish instead a symbolic msgid."""
if msgid_or_symbol[1:].isdigit():
try:
symbol = self.msgs_store.message_id_store.get_symbol(msgid=msgid_or_symbol) # type: ignore
except UnknownMessageError:
return
managed = (self.current_name, msgid_or_symbol, symbol, line, is_disabled) # type: ignore
MessagesHandlerMixIn.__by_id_managed_msgs.append(managed)
def disable(self, msgid, scope="package", line=None, ignore_unknown=False):
self._set_msg_status(
msgid, enable=False, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line)
def enable(self, msgid, scope="package", line=None, ignore_unknown=False):
self._set_msg_status(
msgid, enable=True, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line, is_disabled=False)
def _set_msg_status(
self, msgid, enable, scope="package", line=None, ignore_unknown=False
):
assert scope in ("package", "module")
if msgid == "all":
for _msgid in MSG_TYPES:
self._set_msg_status(_msgid, enable, scope, line, ignore_unknown)
if enable and not self._python3_porting_mode:
# Don't activate the python 3 porting checker if it wasn't activated explicitly.
self.disable("python3")
return
# msgid is a category?
category_id = msgid.upper()
if category_id not in MSG_TYPES:
category_id = MSG_TYPES_LONG.get(category_id)
if category_id is not None:
for _msgid in self.msgs_store._msgs_by_category.get(category_id):
self._set_msg_status(_msgid, enable, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
self._set_msg_status(_msgid, enable, scope, line)
return
# msgid is report id?
if msgid.lower().startswith("rp"):
if enable:
self.enable_report(msgid)
else:
self.disable_report(msgid)
return
try:
# msgid is a symbolic or numeric msgid.
message_definitions = self.msgs_store.get_message_definitions(msgid)
except UnknownMessageError:
if ignore_unknown:
return
raise
for message_definition in message_definitions:
self._set_one_msg_status(scope, message_definition, line, enable)
def _set_one_msg_status(self, scope, msg, line, enable):
if scope == "module":
self.file_state.set_msg_status(msg, line, enable)
if not enable and msg.symbol != "locally-disabled":
self.add_message(
"locally-disabled", line=line, args=(msg.symbol, msg.msgid)
)
else:
msgs = self._msgs_state
msgs[msg.msgid] = enable
# sync configuration object
self.config.enable = [
self._message_symbol(mid) for mid, val in sorted(msgs.items()) if val
]
self.config.disable = [
self._message_symbol(mid)
for mid, val in sorted(msgs.items())
if not val
]
def _message_symbol(self, msgid):
"""Get the message symbol of the given message id
Return the original message id if the message does not
exist.
"""
try:
return [md.symbol for md in self.msgs_store.get_message_definitions(msgid)]
except UnknownMessageError:
return msgid
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None
def is_message_enabled(self, msg_descr, line=None, confidence=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if self.config.confidence and confidence:
if confidence.name not in self.config.confidence:
return False
try:
message_definitions = self.msgs_store.get_message_definitions(msg_descr)
msgids = [md.msgid for md in message_definitions]
except UnknownMessageError:
# The linter checks for messages that are not registered
# due to version mismatch, just treat them as message IDs
# for now.
msgids = [msg_descr]
for msgid in msgids:
if self.is_one_message_enabled(msgid, line):
return True
return False
def is_one_message_enabled(self, msgid, line):
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self.file_state._module_msgs_state[msgid][line]
except KeyError:
# Check if the message's line is after the maximum line existing in ast tree.
# This line won't appear in the ast tree and won't be referred in
# self.file_state._module_msgs_state
# This happens for example with a commented line at the end of a module.
max_line_number = self.file_state.get_effective_max_line_number()
if max_line_number and line > max_line_number:
fallback = True
lines = self.file_state._raw_module_msgs_state.get(msgid, {})
# Doesn't consider scopes, as a disable can be in a different scope
# than that of the current line.
closest_lines = reversed(
[
(message_line, enable)
for message_line, enable in lines.items()
if message_line <= line
]
)
last_line, is_enabled = next(closest_lines, (None, None))
if last_line is not None:
fallback = is_enabled
return self._msgs_state.get(msgid, fallback)
return self._msgs_state.get(msgid, True)
def add_message(
self, msgid, line=None, node=None, args=None, confidence=None, col_offset=None
):
"""Adds a message given by ID or name.
If provided, the message string is expanded using args.
AST checkers must provide the node argument (but may optionally
provide line if the line number is different), raw and token checkers
must provide the line argument.
"""
if confidence is None:
confidence = UNDEFINED
message_definitions = self.msgs_store.get_message_definitions(msgid)
for message_definition in message_definitions:
self.add_one_message(
message_definition, line, node, args, confidence, col_offset
)
@staticmethod
def check_message_definition(message_definition, line, node):
if message_definition.msgid[0] not in _SCOPE_EXEMPT:
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if message_definition.scope == WarningScope.LINE:
if line is None:
raise InvalidMessageError(
"Message %s must provide line, got None"
% message_definition.msgid
)
if node is not None:
raise InvalidMessageError(
"Message %s must only provide line, "
"got line=%s, node=%s" % (message_definition.msgid, line, node)
)
elif message_definition.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
if node is None:
raise InvalidMessageError(
"Message %s must provide Node, got None"
% message_definition.msgid
)
def add_one_message(
self, message_definition, line, node, args, confidence, col_offset
):
self.check_message_definition(message_definition, line, node)
if line is None and node is not None:
line = node.fromlineno
if col_offset is None and hasattr(node, "col_offset"):
col_offset = node.col_offset
# should this message be displayed
if not self.is_message_enabled(message_definition.msgid, line, confidence):
self.file_state.handle_ignored_message(
self.get_message_state_scope(
message_definition.msgid, line, confidence
),
message_definition.msgid,
line,
node,
args,
confidence,
)
return
# update stats
msg_cat = MSG_TYPES[message_definition.msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[message_definition.msgid[0]]
self.stats[msg_cat] += 1
self.stats["by_module"][self.current_name][msg_cat] += 1
try:
self.stats["by_msg"][message_definition.symbol] += 1
except KeyError:
self.stats["by_msg"][message_definition.symbol] = 1
# expand message ?
msg = message_definition.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ""
abspath = self.current_file
else:
module, obj = get_module_and_frameid(node)
abspath = node.root().file
path = abspath.replace(self.reporter.path_strip_prefix, "", 1)
# add the message
self.reporter.handle_message(
Message(
message_definition.msgid,
message_definition.symbol,
(abspath, path, module, obj, line or 1, col_offset or 0),
msg,
confidence,
)
)
def _get_checkers_infos(self):
by_checker = {}
for checker in self.get_checkers():
name = checker.name
if name != "master":
try:
by_checker[name]["checker"] = checker
by_checker[name]["options"] += checker.options_and_values()
by_checker[name]["msgs"].update(checker.msgs)
by_checker[name]["reports"] += checker.reports
except KeyError:
by_checker[name] = {
"checker": checker,
"options": list(checker.options_and_values()),
"msgs": dict(checker.msgs),
"reports": list(checker.reports),
}
return by_checker
def get_checkers_documentation(self):
result = get_rst_title("Pylint global options and switches", "-")
result += """
Pylint provides global options and switches.
"""
for checker in self.get_checkers():
name = checker.name
if name == MAIN_CHECKER_NAME:
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = "General options"
else:
title = "%s options" % section.capitalize()
result += get_rst_title(title, "~")
result += "%s\n" % get_rst_section(None, options)
result += get_rst_title("Pylint checkers' options and switches", "-")
result += """\
Pylint checkers can provide three set of features:
* options that control their execution,
* messages that they can raise,
* reports that they can generate.
Below is a list of all checkers and their features.
"""
by_checker = self._get_checkers_infos()
for checker in sorted(by_checker):
information = by_checker[checker]
checker = information["checker"]
del information["checker"]
result += checker.get_full_documentation(**information)
return result
def print_full_documentation(self, stream=None):
"""output a full documentation in ReST format"""
if not stream:
stream = sys.stdout
print(self.get_checkers_documentation()[:-1], file=stream)
@staticmethod
def _print_checker_doc(information, stream=None):
"""Helper method for print_full_documentation.
Also used by doc/exts/pylint_extensions.py.
"""
if not stream:
stream = sys.stdout
checker = information["checker"]
del information["checker"]
print(checker.get_full_documentation(**information)[:-1], file=stream)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 Edgewall Software
# Copyright (C) 2008 Stephen Hansen
# Copyright (C) 2009-2010 Robert Corsaro
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from operator import itemgetter
from pkg_resources import resource_filename
from trac.core import Component, implements, ExtensionPoint
from trac.notification.api import (INotificationDistributor,
INotificationFormatter,
INotificationSubscriber,
NotificationSystem)
from trac.notification.model import Subscription
from trac.prefs.api import IPreferencePanelProvider
from trac.util import as_int
from trac.util.html import tag
from trac.util.translation import _, cleandoc_
from trac.web.chrome import Chrome, ITemplateProvider, add_notice
from trac.web.session import get_session_attribute
from trac.wiki.macros import WikiMacroBase
def get_preferred_format(env, sid, authenticated, transport):
return get_session_attribute(env, sid, authenticated,
'notification.format.%s' % transport)
class NotificationPreferences(Component):
implements(IPreferencePanelProvider, ITemplateProvider)
subscribers = ExtensionPoint(INotificationSubscriber)
distributors = ExtensionPoint(INotificationDistributor)
formatters = ExtensionPoint(INotificationFormatter)
def __init__(self):
self.post_handlers = {
'add-rule': self._add_rule,
'delete-rule': self._delete_rule,
'move-rule': self._move_rule,
'set-format': self._set_format,
'replace': self._replace_rules,
}
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield ('notification', _('Notifications'))
def render_preference_panel(self, req, panel, path_info=None):
if req.method == 'POST':
action_arg = req.args.getfirst('action', '').split('_', 1)
if len(action_arg) == 2:
action, arg = action_arg
handler = self.post_handlers.get(action)
if handler:
handler(arg, req)
add_notice(req, _("Your preferences have been saved."))
req.redirect(req.href.prefs('notification'))
rules = {}
subscribers = []
formatters = {}
selected_format = {}
defaults = []
for i in self.subscribers:
description = i.description()
if not description:
continue
if not req.session.authenticated and i.requires_authentication():
continue
subscribers.append({'class': i.__class__.__name__,
'description': description})
if hasattr(i, 'default_subscriptions'):
defaults.extend(i.default_subscriptions())
desc_map = dict((s['class'], s['description']) for s in subscribers)
for t in self._iter_transports():
rules[t] = []
formatters[t] = self._get_supported_styles(t)
selected_format[t] = req.session.get('notification.format.%s' % t)
for r in self._iter_rules(req, t):
description = desc_map.get(r['class'])
if description:
values = {'description': description}
values.update((key, r[key]) for key
in ('id', 'adverb', 'class',
'priority'))
rules[t].append(values)
default_rules = {}
for r in sorted(defaults, key=itemgetter(3)): # sort by priority
klass, dist, format, priority, adverb = r
default_rules.setdefault(dist, [])
description = desc_map.get(klass)
if description:
default_rules[dist].append({'adverb': adverb,
'description': description})
data = {'rules': rules, 'subscribers': subscribers,
'formatters': formatters, 'selected_format': selected_format,
'default_rules': default_rules,
'adverbs': ('always', 'never'),
'adverb_labels': {'always': _("Notify"),
'never': _("Never notify")}}
Chrome(self.env).add_jquery_ui(req)
return 'prefs_notification.html', dict(data=data)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
resource_dir = resource_filename('trac.notification', 'templates')
return [resource_dir]
# Internal methods
def _add_rule(self, arg, req):
rule = Subscription(self.env)
rule['sid'] = req.session.sid
rule['authenticated'] = 1 if req.session.authenticated else 0
rule['distributor'] = arg
rule['format'] = req.args.get('format-%s' % arg, '')
rule['adverb'] = req.args['new-adverb-%s' % arg]
rule['class'] = req.args['new-rule-%s' % arg]
Subscription.add(self.env, rule)
def _delete_rule(self, arg, req):
session = req.session
Subscription.delete(self.env, arg, session.sid, session.authenticated)
def _move_rule(self, arg, req):
tokens = [as_int(val, 0) for val in arg.split('-', 1)]
if len(tokens) == 2:
rule_id, priority = tokens
if rule_id > 0 and priority > 0:
session = req.session
Subscription.move(self.env, rule_id, priority, session.sid,
session.authenticated)
def _set_format(self, arg, req):
format_ = req.args.getfirst('format-%s' % arg)
format_ = self._normalize_format(format_, arg)
req.session.set('notification.format.%s' % arg, format_, '')
Subscription.update_format_by_distributor_and_sid(
self.env, arg, req.session.sid, req.session.authenticated, format_)
def _replace_rules(self, arg, req):
subscriptions = []
for transport in self._iter_transports():
format_ = req.args.getfirst('format-' + transport)
format_ = self._normalize_format(format_, transport)
req.session.set('notification.format.%s' % transport, format_, '')
adverbs = req.args.getlist('adverb-' + transport)
classes = req.args.getlist('class-' + transport)
for idx in xrange(min(len(adverbs), len(classes))):
subscriptions.append({'distributor': transport,
'format': format_,
'adverb': adverbs[idx],
'class': classes[idx]})
sid = req.session.sid
authenticated = req.session.authenticated
with self.env.db_transaction:
Subscription.replace_all(self.env, sid, authenticated,
subscriptions)
def _iter_rules(self, req, transport):
session = req.session
for r in Subscription.find_by_sid_and_distributor(
self.env, session.sid, session.authenticated, transport):
yield r
def _iter_transports(self):
for distributor in self.distributors:
for transport in distributor.transports():
yield transport
def _get_supported_styles(self, transport):
styles = set()
for formatter in self.formatters:
for style, realm_ in formatter.get_supported_styles(transport):
styles.add(style)
return sorted(styles)
def _normalize_format(self, format_, transport):
if format_:
styles = self._get_supported_styles(transport)
if format_ in styles:
return format_
return ''
class SubscriberListMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Display a list of all installed notification subscribers, including
documentation if available.
Optionally, the name of a specific subscriber can be provided as an
argument. In that case, only the documentation for that subscriber will
be rendered.
Note that this macro will not be able to display the documentation of
subscribers if the `PythonOptimize` option is enabled for mod_python!
""")
def expand_macro(self, formatter, name, content):
content = content.strip() if content else ''
name_filter = content.strip('*')
items = {}
for subscriber in NotificationSystem(self.env).subscribers:
name = subscriber.__class__.__name__
if not name_filter or name.startswith(name_filter):
items[name] = subscriber.description()
return tag.div(class_='trac-subscriberlist')(
tag.table(class_='wiki')(
tag.thead(tag.tr(
tag.th(_("Subscriber")),
tag.th(_("Description")))),
tag.tbody(
tag.tr(tag.td(tag.code(name)),
tag.td(items[name]),
class_='odd' if idx % 2 else 'even')
for idx, name in enumerate(sorted(items)))))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It can download the necessary training data,
and runs with reasonable defaults to train within a few hours even only using a
CPU. For more information see http://tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = map(int, FLAGS.how_many_training_steps.split(','))
learning_rates_list = map(float, FLAGS.learning_rate.split(','))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.float32, [None, label_count], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits))
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_step = tf.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
expected_indices = tf.argmax(ground_truth_input, 1)
correct_prediction = tf.equal(predicted_indices, expected_indices)
confusion_matrix = tf.confusion_matrix(expected_indices, predicted_indices)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training', sess)
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
merged_summaries, evaluation_step, cross_entropy_mean, train_step,
increment_global_step
],
feed_dict={
fingerprint_input: train_fingerprints,
ground_truth_input: train_ground_truth,
learning_rate_input: learning_rate_value,
dropout_prob: 0.5
})
train_writer.add_summary(train_summary, training_step)
tf.logging.info('Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
is_last_step = (training_step == training_steps_max)
if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
validation_fingerprints, validation_ground_truth = (
audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
0.0, 0, 'validation', sess))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy, conf_matrix = sess.run(
[merged_summaries, evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: validation_fingerprints,
ground_truth_input: validation_ground_truth,
dropout_prob: 1.0
})
validation_writer.add_summary(validation_summary, training_step)
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (validation_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Step %d: Validation accuracy = %.1f%% (N=%d)' %
(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint periodically.
if (training_step % FLAGS.save_step_interval == 0 or
training_step == training_steps_max):
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.model_architecture + '.ckpt')
tf.logging.info('Saving to "%s-%d"', checkpoint_path, training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
set_size = audio_processor.set_size('testing')
tf.logging.info('set_size=%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: test_fingerprints,
ground_truth_input: test_ground_truth,
dropout_prob: 1.0
})
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (test_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % (total_accuracy * 100,
set_size))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=20.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import time
import logging
from oslo_config import cfg
from networking_vsphere.agent import ovsvapp_agent
from networking_vsphere.common import constants as ovsvapp_const
from networking_vsphere.common import error
from networking_vsphere.tests import base
from networking_vsphere.tests.unit.drivers import fake_manager
from networking_vsphere.utils import resource_util
from neutron.agent.common import ovs_lib
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent as ovs_agent # noqa
from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager
NETWORK_ID = 'fake_net_id'
VNIC_ADDED = 'VNIC_ADDED'
FAKE_DEVICE_ID = 'fake_device_id'
FAKE_VM = 'fake_vm'
FAKE_HOST_1 = 'fake_host_1'
FAKE_HOST_2 = 'fake_host_2'
FAKE_CLUSTER_MOID = 'fake_cluster_moid'
FAKE_CLUSTER_1 = 'fake_cluster_1'
FAKE_CLUSTER_2 = 'fake_cluster_2'
FAKE_VCENTER = 'fake_vcenter'
FAKE_PORT_1 = 'fake_port_1'
FAKE_PORT_2 = 'fake_port_2'
FAKE_PORT_3 = 'fake_port_3'
FAKE_PORT_4 = 'fake_port_4'
MAC_ADDRESS = '01:02:03:04:05:06'
FAKE_CONTEXT = 'fake_context'
FAKE_SG = {'fake_sg': 'fake_sg_rule'}
FAKE_SG_RULE = {'security_group_source_groups': ['fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'security_group_id': 'fake_id'
}],
'sg_provider_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68
}]
}
FAKE_SG_RULES = {FAKE_PORT_1: FAKE_SG_RULE}
FAKE_SG_RULES_MULTI_PORTS = {FAKE_PORT_1: FAKE_SG_RULE,
FAKE_PORT_2: FAKE_SG_RULE
}
FAKE_SG_RULES_MISSING = {FAKE_PORT_1: {'security_group_source_groups': [
'fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'sg_provider_rules': [],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress'
}]
}
}
FAKE_SG_RULES_PARTIAL = {FAKE_PORT_1: {'security_group_source_groups': [
'fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'sg_provider_rules': [],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'port_range_min': 22,
'port_range_max': 22
}]
}
}
DEVICE = {'id': FAKE_DEVICE_ID,
'cluster_id': FAKE_CLUSTER_1,
'host': FAKE_HOST_1,
'vcenter': FAKE_VCENTER}
class SampleEvent(object):
def __init__(self, type, host, cluster, srcobj, host_changed=False):
self.event_type = type
self.host_name = host
self.cluster_id = cluster
self.src_obj = srcobj
self.host_changed = host_changed
class VM(object):
def __init__(self, uuid, vnics):
self.uuid = uuid
self.vnics = vnics
class SamplePort(object):
def __init__(self, port_uuid, mac_address=None, pg_id=None):
self.port_uuid = port_uuid
self.mac_address = mac_address
self.pg_id = pg_id
class SamplePortUIDMac(object):
def __init__(self, port_uuid, mac_address):
self.port_uuid = port_uuid
self.mac_address = mac_address
class TestOVSvAppAgentRestart(base.TestCase):
@mock.patch('neutron.common.config.init')
@mock.patch('neutron.common.config.setup_logging')
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.RpcPluginApi')
@mock.patch('neutron.agent.securitygroups_rpc.SecurityGroupServerRpcApi')
@mock.patch('neutron.agent.rpc.PluginReportStateAPI')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.OVSvAppPluginApi')
@mock.patch('neutron.context.get_admin_context_without_session')
@mock.patch('neutron.agent.rpc.create_consumers')
@mock.patch('neutron.plugins.ml2.drivers.openvswitch.agent.'
'ovs_neutron_agent.OVSNeutronAgent.setup_integration_br')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.setup_ovs_bridges')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.setup_security_br')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent._init_ovs_flows')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.'
'OVSFirewallDriver.setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.OVSvAppAgent.__init__')
def setUp(self, mock_ovs_init, mock_get_port_ofport,
mock_set_secure_mode, mock_create_ovs_bridge,
mock_setup_base_flows, mock_check_ovs_firewall_restart,
mock_init_ovs_flows, mock_setup_security_br,
mock_setup_ovs_bridges,
mock_setup_integration_br, mock_create_consumers,
mock_get_admin_context_without_session, mock_ovsvapp_pluginapi,
mock_plugin_report_stateapi, mock_securitygroup_server_rpcapi,
mock_rpc_pluginapi, mock_ovsdb_api, mock_setup_logging,
mock_init):
super(TestOVSvAppAgentRestart, self).setUp()
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_get_port_ofport.return_value = 5
mock_ovs_init.return_value = None
self.agent = ovsvapp_agent.OVSvAppAgent()
self.agent.run_refresh_firewall_loop = False
self.LOG = ovsvapp_agent.LOG
self.agent.monitor_log = logging.getLogger('monitor')
def test_check_ovsvapp_agent_restart(self):
self.agent.int_br = mock.Mock()
with mock.patch.object(self.agent.int_br, 'bridge_exists',
return_value=True) as mock_br_exists, \
mock.patch.object(self.agent.int_br, 'dump_flows_for_table',
return_value='') as mock_dump_flows:
self.assertFalse(self.agent.check_ovsvapp_agent_restart())
self.assertTrue(mock_br_exists.called)
self.assertTrue(mock_dump_flows.called)
mock_dump_flows.return_value = 'cookie = 0x0'
self.assertTrue(self.agent.check_ovsvapp_agent_restart())
self.assertTrue(mock_br_exists.called)
self.assertTrue(mock_dump_flows.called)
class TestOVSvAppAgent(base.TestCase):
@mock.patch('neutron.common.config.init')
@mock.patch('neutron.common.config.setup_logging')
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.RpcPluginApi')
@mock.patch('neutron.agent.securitygroups_rpc.SecurityGroupServerRpcApi')
@mock.patch('neutron.agent.rpc.PluginReportStateAPI')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.OVSvAppPluginApi')
@mock.patch('neutron.context.get_admin_context_without_session')
@mock.patch('neutron.agent.rpc.create_consumers')
@mock.patch('neutron.plugins.ml2.drivers.openvswitch.agent.'
'ovs_neutron_agent.OVSNeutronAgent.setup_integration_br')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.check_ovsvapp_agent_restart')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.setup_ovs_bridges')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent.setup_security_br')
@mock.patch('networking_vsphere.agent.ovsvapp_agent.'
'OVSvAppAgent._init_ovs_flows')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.'
'OVSFirewallDriver.setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
def setUp(self, mock_get_port_ofport,
mock_set_secure_mode, mock_create_ovs_bridge,
mock_setup_base_flows, mock_check_ovs_firewall_restart,
mock_init_ovs_flows, mock_setup_security_br,
mock_setup_ovs_bridges, mock_check_ovsvapp_agent_restart,
mock_setup_integration_br, mock_create_consumers,
mock_get_admin_context_without_session, mock_ovsvapp_pluginapi,
mock_plugin_report_stateapi, mock_securitygroup_server_rpcapi,
mock_rpc_pluginapi, mock_ovsdb_api, mock_setup_logging,
mock_init):
super(TestOVSvAppAgent, self).setUp()
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_check_ovsvapp_agent_restart.return_value = False
mock_get_port_ofport.return_value = 5
self.agent = ovsvapp_agent.OVSvAppAgent()
self.agent.run_refresh_firewall_loop = False
self.LOG = ovsvapp_agent.LOG
self.agent.monitor_log = logging.getLogger('monitor')
def _build_port(self, port):
port = {'admin_state_up': False,
'id': port,
'device': DEVICE,
'network_id': NETWORK_ID,
'physical_network': 'physnet1',
'segmentation_id': '1001',
'lvid': 1,
'network_type': 'vlan',
'fixed_ips': [{'subnet_id': 'subnet_uuid',
'ip_address': '1.1.1.1'}],
'device_owner': 'compute:None',
'security_groups': FAKE_SG,
'mac_address': MAC_ADDRESS,
'device_id': FAKE_DEVICE_ID
}
return port
def _build_update_port(self, port):
port = {'admin_state_up': False,
'id': port,
'network_id': NETWORK_ID,
'fixed_ips': [{'subnet_id': 'subnet_uuid',
'ip_address': '1.1.1.1'}],
'device_owner': 'compute:None',
'security_groups': FAKE_SG,
'mac_address': MAC_ADDRESS,
'device_id': FAKE_DEVICE_ID
}
return port
def test_setup_security_br_none(self):
cfg.CONF.set_override('security_bridge_mapping',
None, 'SECURITYGROUP')
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.LOG, 'warning') as mock_logger_warn,\
mock.patch.object(self.agent.sec_br, 'bridge_exists'
) as mock_ovs_bridge:
self.assertRaises(SystemExit,
self.agent.setup_security_br)
self.assertTrue(mock_logger_warn.called)
self.assertFalse(mock_ovs_bridge.called)
def test_setup_security_br(self):
cfg.CONF.set_override('security_bridge_mapping',
"br-fake:fake_if", 'SECURITYGROUP')
self.agent.sec_br = mock.Mock()
self.agent.int_br = mock.Mock()
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(ovs_lib, "OVSBridge") as mock_ovs_br, \
mock.patch.object(self.agent.sec_br,
"add_patch_port",
return_value=5), \
mock.patch.object(self.agent.int_br,
"add_patch_port",
return_value=6):
self.agent.setup_security_br()
self.assertTrue(mock_ovs_br.called)
self.assertTrue(self.agent.sec_br.add_patch_port.called)
self.assertTrue(mock_logger_info.called)
def test_recover_security_br_none(self):
cfg.CONF.set_override('security_bridge_mapping',
None, 'SECURITYGROUP')
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.LOG, 'warning') as mock_logger_warn, \
mock.patch.object(self.agent.sec_br, 'bridge_exists'
) as mock_ovs_bridge:
self.assertRaises(SystemExit,
self.agent.recover_security_br)
self.assertTrue(mock_logger_warn.called)
self.assertFalse(mock_ovs_bridge.called)
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge')
def test_recover_security_br(self, mock_ovs_bridge):
cfg.CONF.set_override('security_bridge_mapping',
"br-sec:physnet1", 'SECURITYGROUP')
self.agent.int_br = mock.Mock()
self.agent.sec_br = mock.Mock()
mock_br = mock_ovs_bridge.return_value
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(mock_br, 'bridge_exists'), \
mock.patch.object(mock_br, 'add_patch_port') as mock_add_patch_port, \
mock.patch.object(self.agent.int_br,
"get_port_ofport",
return_value=6), \
mock.patch.object(mock_br,
"get_port_ofport",
return_value=6), \
mock.patch.object(mock_br,
"delete_port") as mock_delete_port:
mock_br.get_bridge_for_iface.return_value = 'br-sec'
self.agent.recover_security_br()
self.assertTrue(mock_logger_info.called)
self.assertFalse(mock_delete_port.called)
self.assertFalse(mock_add_patch_port.called)
mock_br.get_bridge_for_iface.return_value = 'br-fake'
self.agent.recover_security_br()
self.assertTrue(mock_logger_info.called)
self.assertTrue(mock_delete_port.called)
self.assertTrue(mock_add_patch_port.called)
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
def test_recover_physical_bridges(self, mock_ovsdb_api):
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
self.agent.bridge_mappings = n_utils.parse_mappings(
cfg.CONF.OVSVAPP.bridge_mappings)
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(self.LOG, 'error') as mock_logger_error, \
mock.patch.object(self.agent, "br_phys_cls") as mock_ovs_br, \
mock.patch.object(ovs_lib.BaseOVS,
"get_bridges",
return_value=['br-eth1']
), \
mock.patch.object(p_utils, 'get_interface_name'
) as mock_int_name, \
mock.patch.object(self.agent.int_br,
"get_port_ofport",
return_value=6) as mock_get_ofport:
self.agent.recover_physical_bridges(self.agent.bridge_mappings)
self.assertTrue(mock_logger_info.called)
self.assertFalse(mock_logger_error.called)
self.assertTrue(mock_ovs_br.called)
self.assertTrue(mock_get_ofport.called)
self.assertTrue(mock_int_name.called)
self.assertEqual(self.agent.int_ofports['physnet1'], 6)
def test_init_ovs_flows(self):
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
self.agent.bridge_mappings = n_utils.parse_mappings(
cfg.CONF.OVSVAPP.bridge_mappings)
self.agent.patch_sec_ofport = 5
self.agent.int_ofports = {'physnet1': 'br-eth1'}
self.agent.phys_ofports = {"physnet1": "br-eth1"}
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.br = mock.Mock()
with mock.patch.object(self.agent.int_br,
"delete_flows"
) as mock_int_br_delete_flows, \
mock.patch.object(self.agent,
"br_phys_cls") as mock_ovs_br, \
mock.patch.object(self.agent.int_br,
"add_flow") as mock_int_br_add_flow:
self.agent._init_ovs_flows(self.agent.bridge_mappings)
self.assertTrue(mock_int_br_delete_flows.called)
self.assertTrue(mock_ovs_br.called)
self.assertTrue(br.delete_flows.called)
self.assertTrue(br.add_flows.called)
self.assertTrue(mock_int_br_add_flow.called)
def test_update_port_bindings(self):
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
return_value=set(["fake_port"])
) as mock_update_ports_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._update_port_bindings()
self.assertTrue(mock_update_ports_binding.called)
self.assertFalse(self.agent.ports_to_bind)
self.assertFalse(mock_log_exception.called)
def test_update_port_bindings_rpc_exception(self):
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
side_effect=Exception()
) as mock_update_port_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._update_port_bindings)
self.assertTrue(mock_update_port_binding.called)
self.assertTrue(mock_log_exception.called)
self.assertEqual(set(['fake_port']),
self.agent.ports_to_bind)
def test_update_port_bindings_partial(self):
self.agent.ports_to_bind.add("fake_port1")
self.agent.ports_to_bind.add("fake_port2")
self.agent.ports_to_bind.add("fake_port3")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
return_value=set(["fake_port1",
"fake_port2"])
) as mock_update_port_binding, \
mock.patch.object(self.LOG, 'exception'):
self.agent._update_port_bindings()
self.assertTrue(mock_update_port_binding.called)
self.assertEqual(set(["fake_port3"]),
self.agent.ports_to_bind)
def test_setup_ovs_bridges_vlan(self):
cfg.CONF.set_override('tenant_network_types',
"vlan", 'OVSVAPP')
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
with mock.patch.object(self.agent, 'setup_physical_bridges'
) as mock_phys_brs, \
mock.patch.object(self.agent, '_init_ovs_flows'
) as mock_init_ovs_flows:
self.agent.setup_ovs_bridges()
mock_phys_brs.assert_called_with(self.agent.bridge_mappings)
mock_init_ovs_flows.assert_called_with(self.agent.bridge_mappings)
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
def test_setup_ovs_bridges_vxlan(self, mock_ovsdb_api):
self.agent.local_ip = "10.10.10.10"
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent, 'setup_tunnel_br'
) as mock_setup_tunnel_br, \
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows:
self.agent.setup_ovs_bridges()
mock_setup_tunnel_br.assert_called_with("br-tun")
self.assertTrue(mock_setup_tunnel_br_flows.called)
def test_setup_ovs_bridges_vxlan_ofport(self):
cfg.CONF.set_override('tenant_network_types',
"vxlan", 'OVSVAPP')
cfg.CONF.set_override('local_ip',
"10.10.10.10", 'OVSVAPP')
cfg.CONF.set_override('tunnel_bridge',
"br-tun", 'OVSVAPP')
self.agent.tun_br = mock.Mock()
self.agent.int_br = mock.Mock()
self.agent.local_ip = "10.10.10.10"
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.tun_br,
"add_patch_port",
return_value=5), \
mock.patch.object(self.agent.int_br,
"add_patch_port",
return_value=6), \
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows:
self.agent.setup_ovs_bridges()
self.assertTrue(self.agent.tun_br.add_patch_port.called)
self.assertEqual(self.agent.patch_tun_ofport, 6)
self.assertEqual(self.agent.patch_int_ofport, 5)
self.assertTrue(mock_setup_tunnel_br_flows.called)
def test_mitigate_ovs_restart_vlan(self):
self.agent.refresh_firewall_required = False
self.agent.devices_to_filter = set(['1111'])
self.agent.cluster_host_ports = set(['1111'])
self.agent.cluster_other_ports = set(['2222'])
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(self.agent, "setup_integration_br"
) as mock_int_br, \
mock.patch.object(self.agent, "setup_physical_bridges"
) as mock_phys_brs, \
mock.patch.object(self.agent, "setup_security_br"
) as mock_sec_br, \
mock.patch.object(self.agent.sg_agent, "init_firewall"
) as mock_init_fw, \
mock.patch.object(self.agent, "setup_tunnel_br"
) as mock_setup_tunnel_br,\
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows, \
mock.patch.object(self.agent, "_init_ovs_flows"
) as mock_init_flows, \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent.mitigate_ovs_restart()
self.assertTrue(mock_int_br.called)
self.assertTrue(mock_phys_brs.called)
self.assertTrue(mock_sec_br.called)
self.assertFalse(mock_setup_tunnel_br.called)
self.assertFalse(mock_setup_tunnel_br_flows.called)
self.assertTrue(mock_init_fw.called)
self.assertTrue(mock_init_flows.called)
self.assertTrue(self.agent.refresh_firewall_required)
self.assertEqual(2, len(self.agent.devices_to_filter))
monitor_warning.assert_called_with("ovs: broken")
monitor_info.assert_called_with("ovs: ok")
self.assertTrue(mock_logger_info.called)
def test_mitigate_ovs_restart_vxlan(self):
self.agent.enable_tunneling = True
self.agent.refresh_firewall_required = False
self.agent.devices_to_filter = set(['1111'])
self.agent.cluster_host_ports = set(['1111'])
self.agent.cluster_other_ports = set(['2222'])
with mock.patch.object(self.LOG, 'info') as mock_logger_info, \
mock.patch.object(self.agent, "setup_integration_br"), \
mock.patch.object(self.agent, "setup_physical_bridges"
) as mock_phys_brs, \
mock.patch.object(self.agent, "setup_security_br"), \
mock.patch.object(self.agent.sg_agent, "init_firewall"
), \
mock.patch.object(self.agent, "setup_tunnel_br"
) as mock_setup_tunnel_br,\
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows, \
mock.patch.object(self.agent, "tunnel_sync"
) as mock_tun_sync, \
mock.patch.object(self.agent, "_init_ovs_flows"), \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent.mitigate_ovs_restart()
self.assertTrue(mock_setup_tunnel_br.called)
self.assertTrue(mock_setup_tunnel_br_flows.called)
self.assertFalse(mock_phys_brs.called)
self.assertTrue(mock_tun_sync.called)
self.assertTrue(self.agent.refresh_firewall_required)
self.assertEqual(len(self.agent.devices_to_filter), 2)
monitor_warning.assert_called_with("ovs: broken")
monitor_info.assert_called_with("ovs: ok")
self.assertTrue(mock_logger_info.called)
def test_mitigate_ovs_restart_exception(self):
self.agent.enable_tunneling = False
self.agent.refresh_firewall_required = False
self.agent.devices_to_filter = set()
self.agent.cluster_host_ports = set(['1111'])
self.agent.cluster_other_ports = set(['2222'])
with mock.patch.object(self.LOG, "info") as mock_logger_info, \
mock.patch.object(self.agent, "setup_integration_br",
side_effect=Exception()) as mock_int_br, \
mock.patch.object(self.agent, "setup_physical_bridges"
) as mock_phys_brs, \
mock.patch.object(self.agent, "setup_tunnel_br"
) as mock_setup_tunnel_br,\
mock.patch.object(self.agent, 'setup_tunnel_br_flows'
) as mock_setup_tunnel_br_flows, \
mock.patch.object(self.LOG, "exception"
) as mock_exception_log, \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent.mitigate_ovs_restart()
self.assertTrue(mock_int_br.called)
self.assertFalse(mock_phys_brs.called)
self.assertFalse(mock_setup_tunnel_br.called)
self.assertFalse(mock_setup_tunnel_br_flows.called)
self.assertFalse(mock_logger_info.called)
self.assertTrue(mock_exception_log.called)
self.assertFalse(self.agent.refresh_firewall_required)
self.assertEqual(0, len(self.agent.devices_to_filter))
monitor_warning.assert_called_with("ovs: broken")
self.assertFalse(monitor_info.called)
def _get_fake_port(self, port_id):
return {'id': port_id,
'port_id': port_id,
'mac_address': MAC_ADDRESS,
'fixed_ips': [{'subnet_id': 'subnet_uuid',
'ip_address': '1.1.1.1'}],
'security_groups': FAKE_SG,
'segmentation_id': 1232,
'lvid': 1,
'network_id': 'fake_network',
'device_id': FAKE_DEVICE_ID,
'admin_state_up': True,
'physical_network': 'physnet1',
'network_type': 'vlan'}
def _build_phys_brs(self, port):
phys_net = port['physical_network']
self.agent.phys_brs[phys_net] = {}
self.agent.phys_brs[phys_net]['eth_ofport'] = 5
br = self.agent.phys_brs[phys_net]['br'] = mock.Mock()
br.add_flows(port['segmentation_id'],
port['mac_address'],
5)
br.delete_flows(port['mac_address'],
port['segmentation_id'])
return br
def test_process_port(self):
fakeport = self._get_fake_port(FAKE_PORT_1)
self.agent.ports_dict = {}
self.agent.vlan_manager.mapping = {}
br = self._build_phys_brs(fakeport)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = fakeport
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan:
status = self.agent._process_port(fakeport)
self.assertIn(FAKE_PORT_1, self.agent.ports_dict)
self.assertTrue(status)
mock_add_devices.assert_called_with([fakeport])
mock_prov_local_vlan.assert_called_with(fakeport)
self.assertTrue(br.add_flows.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
def test_process_port_existing_network(self):
fakeport = self._get_fake_port(FAKE_PORT_1)
self.agent.ports_dict = {}
self.agent.vlan_manager.mapping = {}
br = self._build_phys_brs(fakeport)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = {}
self._build_lvm(fakeport)
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan:
status = self.agent._process_port(fakeport)
self.assertIn(FAKE_PORT_1, self.agent.ports_dict)
self.assertTrue(status)
mock_add_devices.assert_called_with([fakeport])
self.assertFalse(mock_prov_local_vlan.called)
self.assertTrue(br.add_flows.called)
def test_process_uncached_devices_with_few_devices(self):
devices = set(['123', '234', '345', '456', '567', '678',
'1123', '1234', '1345', '1456', '1567', '1678'])
with mock.patch('eventlet.GreenPool.spawn_n') as mock_spawn_thread, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices(devices)
self.assertTrue(mock_spawn_thread.called)
self.assertEqual(1, mock_spawn_thread.call_count)
self.assertFalse(mock_log_exception.called)
def test_process_uncached_devices_with_more_devices(self):
devices = set(['123', '234', '345', '456', '567', '678',
'1123', '1234', '1345', '1456', '1567', '1678',
'2123', '2234', '2345', '2456', '2567', '2678',
'3123', '3234', '3345', '3456', '3567', '3678',
'4123', '4234', '4345', '4456', '4567', '4678',
'5123', '5234', '5345', '5456', '5567', '5678',
'6123', '6234', '6345', '6456', '6567', '6678'])
with mock.patch('eventlet.GreenPool.spawn_n') as mock_spawn_thread, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices(devices)
self.assertTrue(mock_spawn_thread.called)
self.assertEqual(2, mock_spawn_thread.call_count)
self.assertFalse(mock_log_exception.called)
def test_process_uncached_devices_sublist_single_port_vlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
self.agent.ports_dict = {}
br = self._build_phys_brs(fakeport_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
devices = [FAKE_PORT_1]
self.agent.vlan_manager.mapping = {}
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_provision_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertEqual(1, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertTrue(mock_provision_local_vlan.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertTrue(br.add_flows.called)
def test_process_uncached_devices_sublist_multiple_port_vlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
self.agent.ports_dict = {}
self.agent.vlan_manager.mapping = {}
br = self._build_phys_brs(fakeport_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.cluster_host_ports.add(FAKE_PORT_2)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
self.agent.vnic_info[FAKE_PORT_2] = fakeport_2
devices = [FAKE_PORT_1, FAKE_PORT_2]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertEqual(2, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_2, self.agent.vnic_info)
self.assertTrue(br.add_flows.called)
def test_process_uncached_devices_sublist_single_port_vxlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_1["network_type"] = p_const.TYPE_VXLAN
self.agent.ports_dict = {}
self.agent.vlan_manager.mapping = {}
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
devices = [FAKE_PORT_1]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_populate_lvm'), \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertEqual(1, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
def test_process_uncached_devices_sublist_multiple_port_vxlan(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
fakeport_1["network_type"] = p_const.TYPE_VXLAN
fakeport_2["network_type"] = p_const.TYPE_VXLAN
self.agent.ports_dict = {}
self.agent.vlan_manager.mapping = {}
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.cluster_host_ports.add(FAKE_PORT_2)
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
self.agent.vnic_info[FAKE_PORT_2] = fakeport_2
devices = [FAKE_PORT_1, FAKE_PORT_2]
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent, '_populate_lvm'), \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertEqual(2, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_2, self.agent.vnic_info)
def test_process_uncached_devices_sublist_stale_vm_port(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
fakeport_3 = self._get_fake_port(FAKE_PORT_3)
self.agent.ports_dict = {}
self.agent.vlan_manager.mapping = {}
self._build_phys_brs(fakeport_1)
self._build_phys_brs(fakeport_2)
self._build_phys_brs(fakeport_3)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.cluster_host_ports.add(FAKE_PORT_2)
self.agent.ports_to_bind = set([FAKE_PORT_3, FAKE_PORT_4])
self.agent.vnic_info[FAKE_PORT_1] = fakeport_1
self.agent.vnic_info[FAKE_PORT_2] = fakeport_2
self.agent.vnic_info[FAKE_PORT_3] = fakeport_3
devices = [FAKE_PORT_1, FAKE_PORT_2, FAKE_PORT_3]
self.agent.sg_agent.remove_devices_filter = mock.Mock()
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_to_filter, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
)as mock_refresh_firewall, \
mock.patch.object(self.agent.sg_agent,
'remove_devices_filter'
)as mock_remove_device_filter, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent, '_remove_stale_ports_flows'), \
mock.patch.object(self.agent, '_block_stale_ports'), \
mock.patch.object(self.LOG, 'exception') as mock_log_exception:
self.agent._process_uncached_devices_sublist(devices)
self.assertTrue(mock_get_ports_details_list.called)
self.assertEqual(2, mock_add_devices_to_filter.call_count)
self.assertTrue(mock_refresh_firewall.called)
self.assertTrue(mock_prov_local_vlan.called)
self.assertFalse(mock_log_exception.called)
self.assertNotIn(FAKE_PORT_3, self.agent.ports_to_bind)
self.assertIn(FAKE_PORT_4, self.agent.ports_to_bind)
self.assertNotIn(FAKE_PORT_1, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_2, self.agent.vnic_info)
self.assertNotIn(FAKE_PORT_3, self.agent.vnic_info)
mock_remove_device_filter.assert_called_with(FAKE_PORT_3)
def test_update_firewall(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
fakeport_2 = self._get_fake_port(FAKE_PORT_2)
self._build_phys_brs(fakeport_1)
self._build_phys_brs(fakeport_2)
self.agent.devices_to_filter = set([FAKE_PORT_1,
FAKE_PORT_2])
self.agent.ports_dict = {FAKE_PORT_1: fakeport_1}
self.agent.vnic_info[FAKE_PORT_1] = {}
self.agent.vnic_info[FAKE_PORT_2] = {}
self.agent.refresh_firewall_required = True
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
return_value=[fakeport_1, fakeport_2]
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
) as mock_refresh_firewall, \
mock.patch.object(self.agent, '_provision_local_vlan'
), \
mock.patch.object(self.agent, '_remove_stale_ports_flows'), \
mock.patch.object(self.agent, '_block_stale_ports'), \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent._update_firewall()
self.assertFalse(self.agent.refresh_firewall_required)
self.assertFalse(self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_2, self.agent.ports_dict)
mock_get_ports_details_list.assert_called_with(
self.agent.context,
[FAKE_PORT_2],
self.agent.agent_id,
self.agent.vcenter_id,
self.agent.cluster_id)
mock_refresh_firewall.assert_called_with(set([FAKE_PORT_1,
FAKE_PORT_2]))
self.assertEqual(2, monitor_warning.call_count)
self.assertEqual(2, monitor_info.call_count)
def test_update_firewall_get_ports_exception(self):
fakeport_1 = self._get_fake_port(FAKE_PORT_1)
self.agent.devices_to_filter = set([FAKE_PORT_1,
FAKE_PORT_2])
self.agent.ports_dict = {FAKE_PORT_1: fakeport_1}
self.agent.refresh_firewall_required = True
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
with mock.patch.object(self.agent.ovsvapp_rpc,
'get_ports_details_list',
side_effect=Exception()
) as mock_get_ports_details_list, \
mock.patch.object(self.agent.sg_agent, 'refresh_firewall'
) as mock_refresh_firewall, \
mock.patch.object(self.agent.monitor_log, "warning"
) as monitor_warning, \
mock.patch.object(self.agent.monitor_log, "info"
) as monitor_info:
self.agent._update_firewall()
self.assertTrue(self.agent.refresh_firewall_required)
self.assertEqual(set([FAKE_PORT_2]), self.agent.devices_to_filter)
self.assertNotIn(FAKE_PORT_2, self.agent.ports_dict)
mock_get_ports_details_list.assert_called_with(
self.agent.context,
[FAKE_PORT_2],
self.agent.agent_id,
self.agent.vcenter_id,
self.agent.cluster_id)
mock_refresh_firewall.assert_called_with(set([FAKE_PORT_1]))
self.assertEqual(2, monitor_warning.call_count)
self.assertEqual(1, monitor_info.call_count)
def test_check_for_updates_no_updates(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent.sg_agent, 'refresh_port_filters'
) as mock_refresh_port_filters, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_refresh_port_filters.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_ovs_restarted(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=0) as mock_check_ovs, \
mock.patch.object(self.agent, 'mitigate_ovs_restart'
) as mock_mitigate, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertTrue(mock_mitigate.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_update_port_bindings.called)
@mock.patch.object(ovsvapp_agent.OVSvAppAgent, 'check_ovs_status')
def test_check_for_updates_ovs_dead(self, check_ovs_status):
check_ovs_status.return_value = 2
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'mitigate_ovs_restart'
) as mock_mitigate, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(self.agent.ovsvapp_mitigation_required)
self.assertTrue(check_ovs_status.called)
self.assertFalse(mock_mitigate.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_update_port_bindings.called)
check_ovs_status.return_value = 1
self.agent._check_for_updates()
self.assertTrue(check_ovs_status.called)
self.assertTrue(mock_mitigate.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_update_port_bindings.called)
self.assertFalse(self.agent.ovsvapp_mitigation_required)
def test_check_for_updates_devices_to_filter(self):
self.agent.refresh_firewall_required = True
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, 'mitigate_ovs_restart'
) as mock_mitigate, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall,\
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_mitigate.called)
self.assertTrue(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_firewall_refresh(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind = None
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=True
) as mock_firewall_refresh,\
mock.patch.object(self.agent.sg_agent, 'refresh_port_filters'
) as mock_refresh_port_filters, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertTrue(mock_refresh_port_filters.called)
self.assertFalse(mock_update_port_bindings.called)
def test_check_for_updates_port_bindings(self):
self.agent.refresh_firewall_required = False
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent, 'check_ovs_status',
return_value=4) as mock_check_ovs, \
mock.patch.object(self.agent, '_update_firewall'
) as mock_update_firewall, \
mock.patch.object(self.agent.sg_agent,
'firewall_refresh_needed',
return_value=False
) as mock_firewall_refresh, \
mock.patch.object(self.agent, '_update_port_bindings'
) as mock_update_port_bindings:
self.agent._check_for_updates()
self.assertTrue(mock_check_ovs.called)
self.assertFalse(mock_update_firewall.called)
self.assertTrue(mock_firewall_refresh.called)
self.assertTrue(mock_update_port_bindings.called)
def test_update_devices_up(self):
self.agent.devices_up_list.append(FAKE_PORT_1)
ret_value = {'devices_up': [FAKE_PORT_1],
'failed_devices_up': []}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_up",
return_value=ret_value
) as update_devices_up, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_up()
self.assertTrue(update_devices_up.called)
self.assertFalse(self.agent.devices_up_list)
self.assertFalse(log_exception.called)
def test_update_devices_up_rpc_exception(self):
self.agent.devices_up_list.append(FAKE_PORT_1)
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_up",
side_effect=Exception()
) as update_devices_up, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_up()
self.assertTrue(update_devices_up.called)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertTrue(log_exception.called)
def test_update_devices_up_partial(self):
self.agent.devices_up_list = [FAKE_PORT_1, FAKE_PORT_2, FAKE_PORT_3]
ret_value = {'devices_up': [FAKE_PORT_1, FAKE_PORT_2],
'failed_devices_up': [FAKE_PORT_3]}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_up",
return_value=ret_value
) as update_devices_up, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_up()
self.assertTrue(update_devices_up.called)
self.assertEqual([FAKE_PORT_3], self.agent.devices_up_list)
self.assertFalse(log_exception.called)
def test_update_devices_down(self):
self.agent.devices_down_list.append(FAKE_PORT_1)
ret_value = {'devices_down': [FAKE_PORT_1],
'failed_devices_down': []}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_down",
return_value=ret_value
) as update_devices_down, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_down()
self.assertTrue(update_devices_down.called)
self.assertFalse(self.agent.devices_down_list)
self.assertFalse(log_exception.called)
def test_update_devices_down_rpc_exception(self):
self.agent.devices_down_list.append(FAKE_PORT_1)
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_down",
side_effect=Exception()
) as update_devices_down, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_down()
self.assertTrue(update_devices_down.called)
self.assertEqual([FAKE_PORT_1], self.agent.devices_down_list)
self.assertTrue(log_exception.called)
def test_update_devices_down_partial(self):
self.agent.devices_down_list = [FAKE_PORT_1, FAKE_PORT_2, FAKE_PORT_3]
ret_value = {'devices_down': [FAKE_PORT_1, FAKE_PORT_2],
'failed_devices_down': [FAKE_PORT_3]}
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_devices_down",
return_value=ret_value
) as update_devices_down, \
mock.patch.object(self.LOG, 'exception'
) as log_exception:
self.agent._update_devices_down()
self.assertTrue(update_devices_down.called)
self.assertEqual([FAKE_PORT_3], self.agent.devices_down_list)
self.assertFalse(log_exception.called)
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state,
True)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertFalse(self.agent.use_call)
self.assertEqual(cfg.CONF.host,
self.agent.agent_state["host"])
def test_report_state_fail(self):
with mock.patch.object(self.agent.state_rpc,
"report_state",
side_effect=Exception()) as mock_report_st, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._report_state()
mock_report_st.assert_called_with(self.agent.context,
self.agent.agent_state,
True)
self.assertTrue(mock_log_exception.called)
def test_process_event_ignore_event(self):
vm = VM(FAKE_VM, [])
event = SampleEvent(VNIC_ADDED, FAKE_HOST_1,
FAKE_CLUSTER_MOID, vm)
with mock.patch.object(self.agent,
"_notify_device_added") as mock_add_vm, \
mock.patch.object(self.agent,
"_notify_device_updated") as mock_update_vm, \
mock.patch.object(self.agent,
"_notify_device_deleted") as mock_del_vm, \
mock.patch.object(self.LOG, 'debug') as mock_log_debug:
self.agent.process_event(event)
self.assertFalse(mock_add_vm.called)
self.assertFalse(mock_update_vm.called)
self.assertFalse(mock_del_vm.called)
self.assertTrue(mock_log_debug.called)
def test_process_event_exception(self):
vm = VM(FAKE_VM, [])
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
with mock.patch.object(self.agent,
"_notify_device_added",
side_effect=Exception()) as mock_add_vm, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(self.LOG, 'error') as mock_log_error:
self.agent.process_event(event)
self.assertTrue(mock_add_vm.called)
self.assertTrue(mock_log_error.called)
self.assertTrue(mock_log_exception.called)
def test_process_event_vm_create_nonics_non_host_non_cluster(self):
self.agent.esx_hostname = FAKE_HOST_2
vm = VM(FAKE_VM, [])
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent,
"_notify_device_added") as device_added:
self.agent.process_event(event)
self.assertTrue(device_added.called)
def test_process_event_vm_create_nonics_non_host(self):
self.agent.esx_hostname = FAKE_HOST_2
vm = VM(FAKE_VM, [])
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent,
"_notify_device_added") as device_added:
self.agent.process_event(event)
self.assertTrue(device_added.called)
self.assertEqual(FAKE_CLUSTER_MOID, self.agent.cluster_moid)
def test_process_event_vm_create_nics_non_host(self):
self.agent.esx_hostname = FAKE_HOST_2
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.agent.sec_br, 'dump_flows_for',
return_value='mock_flow') as mock_dump_flows:
self.agent.process_event(event)
self.assertTrue(mock_dump_flows.called)
for vnic in vm.vnics:
self.assertIn(vnic.port_uuid, self.agent.devices_to_filter)
self.assertIn(vnic.port_uuid, self.agent.cluster_other_ports)
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
def test_process_event_vm_create_nics_host(self):
self.agent.esx_hostname = FAKE_HOST_1
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
event = SampleEvent(ovsvapp_const.VM_CREATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.sec_br = mock.Mock()
with mock.patch.object(self.agent.sec_br, 'dump_flows_for',
return_value='mock_flow') as mock_dump_flows:
self.agent.process_event(event)
self.assertTrue(mock_dump_flows.called)
for vnic in vm.vnics:
self.assertIn(vnic.port_uuid, self.agent.devices_to_filter)
self.assertIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertNotIn(vnic.port_uuid, self.agent.cluster_other_ports)
with mock.patch.object(self.agent.sec_br, 'dump_flows_for',
return_value='') as mock_dump_flows, \
mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=True) as mock_get_ports:
self.agent.process_event(event)
self.assertTrue(mock_dump_flows.called)
self.assertTrue(mock_get_ports.called)
def test_process_event_vm_updated_nonhost(self):
self.agent.esx_hostname = FAKE_HOST_2
vm_port1 = SamplePort(FAKE_PORT_1)
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm = VM(FAKE_VM, [vm_port1])
event = SampleEvent(ovsvapp_const.VM_UPDATED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm, True)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.process_event(event)
self.assertIn(FAKE_PORT_1, self.agent.cluster_other_ports)
def test_process_event_vm_delete_hosted_vm_vlan(self):
self.agent.esx_hostname = FAKE_HOST_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self._build_lvm(port)
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
) as mock_post_del_vm, \
mock.patch.object(self.LOG, 'debug'), \
mock.patch.object(self.agent.net_mgr.get_driver(),
"delete_network") as mock_del_net:
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertTrue(mock_post_del_vm.called)
self.assertFalse(mock_del_net.called)
self.assertTrue(br.delete_flows.called)
def test_process_event_vm_delete_hosted_vm_vxlan(self):
self.agent.esx_hostname = FAKE_HOST_1
self.agent.cluster_host_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
return_value=True) as (post_del_vm):
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid, self.agent.cluster_host_ports)
self.assertTrue(post_del_vm.called)
def test_process_event_vm_delete_non_hosted_vm(self):
self.agent.esx_hostname = FAKE_HOST_2
self.agent.cluster_other_ports.add(FAKE_PORT_1)
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
vm_port = SamplePortUIDMac(FAKE_PORT_1, MAC_ADDRESS)
vm = VM(FAKE_VM, ([vm_port]))
event = SampleEvent(ovsvapp_const.VM_DELETED,
FAKE_HOST_1, FAKE_CLUSTER_MOID, vm)
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.net_mgr.get_driver(),
"post_delete_vm",
return_value=True) as mock_post_del_vm, \
mock.patch.object(self.agent.net_mgr.get_driver(),
"delete_network") as mock_del_net:
self.agent.process_event(event)
for vnic in vm.vnics:
self.assertNotIn(vnic.port_uuid,
self.agent.cluster_other_ports)
self.assertTrue(mock_post_del_vm.called)
self.assertFalse(mock_del_net.called)
def test_notify_device_added_with_hosted_vm(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=True) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.agent._notify_device_added(vm, host)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_time_sleep.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_added_rpc_exception(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
side_effect=Exception()) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
)as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_added, vm, host)
self.assertTrue(mock_log_exception.called)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_time_sleep.called)
def test_notify_device_added_with_retry(self):
vm = VM(FAKE_VM, [])
host = FAKE_HOST_1
self.agent.esx_hostname = host
self.agent.state = ovsvapp_const.AGENT_RUNNING
with mock.patch.object(self.agent.ovsvapp_rpc,
"get_ports_for_device",
return_value=False) as mock_get_ports, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(time, "sleep") as mock_time_sleep:
self.agent._notify_device_added(vm, host)
self.assertTrue(mock_get_ports.called)
self.assertTrue(mock_time_sleep.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_migration_vlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent._add_ports_to_host_ports([FAKE_PORT_1])
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, FAKE_HOST_2, True)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_update_not_found(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
br = self.agent.phys_brs[port['physical_network']]['br']
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
):
self.agent._notify_device_updated(vm, host, True)
self.assertFalse(br.add_drop_flows.called)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
):
self.agent._notify_device_updated(vm, host, True)
self.assertTrue(br.add_drop_flows.called)
def test_notify_device_updated_host_vlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self._build_lvm(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
br = self.agent.phys_brs[port['physical_network']]['br']
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(br.add_flows.called)
def test_notify_device_updated_vlan_rpc_exception(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
port = self._build_port(FAKE_PORT_1)
br = self._build_phys_brs(port)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(port)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding",
side_effect=Exception()
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_updated, vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(br.add_flows.called)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(mock_log_exception.called)
def test_notify_device_updated_host_vlan_multiple_nic(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm_port2 = SamplePort(FAKE_PORT_2)
vm = VM(FAKE_VM, ([vm_port1, vm_port2]))
port1 = self._build_port(FAKE_PORT_1)
port2 = self._build_port(FAKE_PORT_2)
br1 = self._build_phys_brs(port1)
br2 = self._build_phys_brs(port2)
self.agent.ports_dict[port1['id']] = self.agent._build_port_info(port1)
self.agent.ports_dict[port2['id']] = self.agent._build_port_info(port2)
self._build_lvm(port1)
self._build_lvm(port2)
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
self.assertEqual(1, mock_update_device_binding.call_count)
self.assertTrue(br1.add_flows.called)
self.assertTrue(br2.add_flows.called)
def _build_lvm(self, port):
try:
self.agent.vlan_manager.add(port['network_id'], port['lvid'],
port['network_type'],
port['physical_network'], '1234')
except vlanmanager.MappingAlreadyExists:
return None
def test_notify_device_updated_host_vxlan(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
port1 = self._build_port(FAKE_PORT_1)
port1['network_type'] = p_const.TYPE_VXLAN
self.agent.ports_dict[port1['id']] = self.agent._build_port_info(port1)
vm = VM(FAKE_VM, [vm_port1])
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding"
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._notify_device_updated(vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertFalse(mock_log_exception.called)
def test_notify_device_updated_vxlan_rpc_exception(self):
host = FAKE_HOST_1
self.agent.esx_hostname = host
vm_port1 = SamplePort(FAKE_PORT_1)
vm = VM(FAKE_VM, [vm_port1])
self.agent.state = ovsvapp_const.AGENT_RUNNING
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_device_binding",
side_effect=Exception()
) as mock_update_device_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._notify_device_updated, vm, host, True)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertTrue(mock_update_device_binding.called)
self.assertTrue(mock_log_exception.called)
def test_map_port_to_common_model_vlan(self):
expected_port = self._build_port(FAKE_PORT_1)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
network, port = self.agent._map_port_to_common_model(expected_port)
expected_name = expected_port['network_id'] + "-" + FAKE_CLUSTER_MOID
self.assertEqual(expected_name, network.name)
self.assertEqual(expected_port['id'], port.uuid)
def test_map_port_to_common_model_vxlan(self):
expected_port = self._build_port(FAKE_PORT_1)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
network, port = self.agent._map_port_to_common_model(expected_port, 1)
expected_name = expected_port['network_id'] + "-" + FAKE_CLUSTER_MOID
self.assertEqual(expected_name, network.name)
self.assertEqual(expected_port['id'], port.uuid)
def test_device_create_cluster_mismatch(self):
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_2
with mock.patch.object(self.agent,
'_process_create_ports',
return_value=True) as mock_create_ports, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE)
self.assertTrue(mock_logger_debug.called)
self.assertFalse(mock_create_ports.called)
def test_device_create_non_hosted_vm(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.esx_hostname = FAKE_HOST_2
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.vlan_manager.mapping = {}
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
mock_add_devices_fn.assert_called_with(ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(self.agent.devices_up_list)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.vlan_manager.mapping = {}
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan_sg_rule_missing(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.vlan_manager.mapping = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MISSING
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vlan_sg_rule_partial_missing(self):
ports = [self._build_port(FAKE_PORT_1)]
self._build_phys_brs(ports[0])
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.devices_to_filter = set()
self.agent.vlan_manager.mapping = {}
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_PARTIAL
) as mock_expand_sg_rules, \
mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_prov_local_vlan.called)
def test_device_create_hosted_vm_vxlan(self):
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
ports = [port]
self.agent.vlan_manager.mapping = {}
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.vlan_manager.mapping = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent,
'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'
) as mock_update_device_up, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_prov_local_vlan.called)
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertNotIn(FAKE_PORT_1, self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_update_device_up.called)
def test_device_create_hosted_vm_vxlan_sg_rule_missing(self):
port = self._build_port(FAKE_PORT_1)
port['network_type'] = p_const.TYPE_VXLAN
ports = [port]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VXLAN]
self.agent.vlan_manager.mapping = {}
self.agent.devices_to_filter = set()
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
with mock.patch.object(self.agent, '_provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent,
'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MISSING
) as mock_expand_sg_rules, \
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'
) as mock_update_device_up, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_prov_local_vlan.called)
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.devices_to_filter)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
mock_add_devices_fn.assert_called_with(ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_update_device_up.called)
def test_device_create_hosted_vm_create_port_exception(self):
ports = [self._build_port(FAKE_PORT_1)]
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().create_port = mock.Mock(
side_effect=Exception())
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
), \
mock.patch.object(self.agent, '_provision_local_vlan'
), \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
) as mock_sg_update_fn, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES
) as mock_expand_sg_rules, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug, \
mock.patch.object(self.LOG, 'exception') as mock_log_excep:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent.device_create,
FAKE_CONTEXT, device=DEVICE,
ports=ports, sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertNotIn(FAKE_PORT_1, self.agent.cluster_other_ports)
self.assertIn(FAKE_PORT_1, self.agent.cluster_host_ports)
self.assertFalse(mock_sg_update_fn.called)
self.assertTrue(mock_expand_sg_rules.called)
self.assertTrue(mock_log_excep.called)
def test_port_update_admin_state_up(self):
port = self._build_port(FAKE_PORT_1)
self.agent.ports_dict[port['id']] = self.agent._build_port_info(
port)
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.cluster_host_ports = set([port['id']])
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
updated_port = self._build_update_port(FAKE_PORT_1)
updated_port['admin_state_up'] = True
self.devices_up_list = []
neutron_port = {'port': updated_port,
'segmentation_id': port['segmentation_id']}
with mock.patch.object(self.LOG, 'exception'
) as mock_log_exception, \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.port_update(FAKE_CONTEXT, **neutron_port)
self.assertEqual(neutron_port['port']['admin_state_up'],
self.agent.ports_dict[port['id']].
admin_state_up)
self.assertEqual([FAKE_PORT_1], self.agent.devices_up_list)
self.assertFalse(mock_log_exception.called)
self.assertTrue(mock_logger_debug.called)
def test_device_update_maintenance_mode(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = True
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff") as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode"
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep'):
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertTrue(power_off.called)
self.assertTrue(maintenance_mode.called)
self.assertFalse(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=True)
self.assertFalse(log_exception.called)
def test_device_update_shutdown_mode(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = False
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff") as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode"
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep'):
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertFalse(power_off.called)
self.assertFalse(maintenance_mode.called)
self.assertTrue(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=True)
self.assertFalse(log_exception.called)
def test_device_update_ovsvapp_alreadly_powered_off(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = True
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff",
side_effect=Exception()) as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode"
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep'):
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertTrue(power_off.called)
self.assertTrue(maintenance_mode.called)
self.assertFalse(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=True)
self.assertTrue(log_exception.called)
def test_device_update_maintenance_mode_exception(self):
kwargs = {'device_data': {'ovsvapp_agent': 'fake_agent_host_1',
'esx_host_name': FAKE_HOST_1,
'assigned_agent_host': FAKE_HOST_2}}
self.agent.hostname = FAKE_HOST_2
self.agent.esx_maintenance_mode = True
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.net_mgr.get_driver().session = "fake_session"
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.vcenter_id = FAKE_VCENTER
with mock.patch.object(resource_util,
"get_vm_mor_by_name",
return_value="vm_mor") as vm_mor_by_name, \
mock.patch.object(resource_util,
"get_host_mor_by_name",
return_value="host_mor"
) as host_mor_by_name, \
mock.patch.object(resource_util,
"set_vm_poweroff",
side_effect=Exception()) as power_off, \
mock.patch.object(resource_util,
"set_host_into_maintenance_mode",
side_effect=Exception()
) as maintenance_mode, \
mock.patch.object(resource_util,
"set_host_into_shutdown_mode"
) as shutdown_mode, \
mock.patch.object(self.agent.ovsvapp_rpc,
"update_cluster_lock") as cluster_lock, \
mock.patch.object(self.LOG, 'exception') as log_exception, \
mock.patch.object(time, 'sleep') as time_sleep:
self.agent.device_update(FAKE_CONTEXT, **kwargs)
self.assertTrue(vm_mor_by_name.called)
self.assertTrue(host_mor_by_name.called)
self.assertTrue(power_off.called)
self.assertTrue(maintenance_mode.called)
self.assertFalse(shutdown_mode.called)
self.assertTrue(cluster_lock.called)
cluster_lock.assert_called_with(self.agent.context,
cluster_id=self.agent.cluster_id,
vcenter_id=self.agent.vcenter_id,
success=False)
self.assertTrue(log_exception.called)
self.assertTrue(time_sleep.called)
def test_enhanced_sg_provider_updated(self):
kwargs = {'network_id': NETWORK_ID}
with mock.patch.object(self.LOG, 'info') as log_info, \
mock.patch.object(self.agent.sg_agent, "sg_provider_updated"
) as mock_sg_provider_updated:
self.agent.enhanced_sg_provider_updated(FAKE_CONTEXT, **kwargs)
self.assertTrue(log_info.called)
mock_sg_provider_updated.assert_called_with(NETWORK_ID)
def test_device_create_hosted_vm_vlan_multiple_physnet(self):
port1 = self._build_port(FAKE_PORT_1)
port2 = self._build_port(FAKE_PORT_2)
port2['physical_network'] = "physnet2"
port2['segmentation_id'] = "2005"
port2['network_id'] = "fake_net2"
ports = [port1, port2]
self._build_phys_brs(port1)
self._build_phys_brs(port2)
self.agent.phys_ofports = {}
self.agent.phys_ofports[port1['physical_network']] = 4
self.agent.phys_ofports[port2['physical_network']] = 5
self.agent.vcenter_id = FAKE_VCENTER
self.agent.cluster_id = FAKE_CLUSTER_1
self.agent.cluster_moid = FAKE_CLUSTER_MOID
self.agent.esx_hostname = FAKE_HOST_1
self.agent.tenant_network_types = [p_const.TYPE_VLAN]
self.agent.devices_up_list = []
self.agent.net_mgr = fake_manager.MockNetworkManager("callback")
self.agent.net_mgr.initialize_driver()
self.agent.int_br = mock.Mock()
self.agent.vlan_manager.mapping = {}
self.agent.patch_sec_ofport = 1
self.agent.int_ofports = {'physnet1': 2, 'physnet2': 3}
with mock.patch.object(self.agent.sg_agent, 'add_devices_to_filter'
) as mock_add_devices_fn, \
mock.patch.object(self.agent.sg_agent, 'ovsvapp_sg_update'
), \
mock.patch.object(self.agent.int_br, 'provision_local_vlan'
) as mock_prov_local_vlan, \
mock.patch.object(self.agent.sg_agent, 'expand_sg_rules',
return_value=FAKE_SG_RULES_MULTI_PORTS
), \
mock.patch.object(self.LOG, 'debug') as mock_logger_debug:
self.agent.device_create(FAKE_CONTEXT,
device=DEVICE,
ports=ports,
sg_rules=mock.MagicMock())
self.assertTrue(mock_logger_debug.called)
self.assertEqual([FAKE_PORT_1, FAKE_PORT_2],
self.agent.devices_up_list)
mock_add_devices_fn.assert_called_with(ports)
self.assertTrue(mock_prov_local_vlan.called)
mock_prov_local_vlan.assert_any_call(
port1['network_type'],
port1['lvid'],
port1['segmentation_id'],
self.agent.patch_sec_ofport,
self.agent.int_ofports['physnet1'], None)
mock_prov_local_vlan.assert_any_call(
port2['network_type'],
port2['lvid'],
port2['segmentation_id'],
self.agent.patch_sec_ofport,
self.agent.int_ofports['physnet2'], None)
|
|
import unittest
import sys
import os
import subprocess
import shutil
from copy import copy
from test.support import (run_unittest,
import_module, TESTFN, unlink, check_warnings,
captured_stdout, impl_detail, import_module,
skip_unless_symlink, change_cwd)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._set_uname(('',)*5)
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = sysconfig._CONFIG_VARS, copy(sysconfig._CONFIG_VARS)
self._added_envvars = []
self._changed_envvars = []
for var in ('MACOSX_DEPLOYMENT_TARGET', 'PATH'):
if var in os.environ:
self._changed_envvars.append((var, os.environ[var]))
else:
self._added_envvars.append(var)
def tearDown(self):
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = self._config_vars[0]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self._config_vars[1])
for var, value in self._changed_envvars:
os.environ[var] = value
for var in self._added_envvars:
os.environ.pop(var, None)
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = os.uname_result(uname)
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = sorted(wanted.items())
scheme = sorted(scheme.items())
self.assertEqual(scheme, wanted)
def test_get_path(self):
# XXX make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user', 'pypy')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["PATH"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["PATH"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.base_prefix != sys.base_exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.base_prefix)
base = base.replace(sys.exec_prefix, sys.base_prefix)
elif sys.base_prefix != sys.prefix:
# virtual environment? Likewise, we have to adapt the paths
# before comparing
global_path = global_path.replace(sys.base_prefix, sys.prefix)
base = base.replace(sys.base_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@impl_detail("PyPy lacks LDFLAGS/LDSHARED config vars", pypy=False)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
@impl_detail("Test is not PyPy compatible", pypy=False)
def test_srcdir(self):
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
self.assertTrue(os.path.isabs(srcdir), srcdir)
self.assertTrue(os.path.isdir(srcdir), srcdir)
if sysconfig._PYTHON_BUILD:
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = os.path.join(srcdir, 'Include', 'Python.h')
self.assertTrue(os.path.exists(Python_h), Python_h)
self.assertTrue(sysconfig._is_python_source_dir(srcdir))
elif os.name == 'posix':
makefile_dir = os.path.dirname(sysconfig.get_makefile_filename())
# Issue #19340: srcdir has been realpath'ed already
makefile_dir = os.path.realpath(makefile_dir)
self.assertEqual(makefile_dir, srcdir)
def test_srcdir_independent_of_cwd(self):
# srcdir should be independent of the current working directory
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
with change_cwd(os.pardir):
srcdir2 = sysconfig.get_config_var('srcdir')
self.assertEqual(srcdir, srcdir2)
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_deprecation(self):
self.assertWarns(DeprecationWarning,
sysconfig.get_config_var, 'SO')
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_value(self):
with check_warnings(('', DeprecationWarning)):
self.assertEqual(sysconfig.get_config_var('SO'),
sysconfig.get_config_var('EXT_SUFFIX'))
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_in_vars(self):
vars = sysconfig.get_config_vars()
self.assertIsNotNone(vars['SO'])
self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
@unittest.skipUnless(sys.platform == 'linux' and
hasattr(sys.implementation, '_multiarch'),
'multiarch-specific test')
def test_triplet_in_ext_suffix(self):
ctypes = import_module('ctypes')
import platform, re
machine = platform.machine()
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if re.match('(aarch64|arm|mips|ppc|powerpc|s390|sparc)', machine):
self.assertTrue('linux' in suffix, suffix)
if re.match('(i[3-6]86|x86_64)$', machine):
if ctypes.sizeof(ctypes.c_char_p()) == 4:
self.assertTrue(
suffix.endswith((
'i386-linux-gnu.so',
'i486-linux-gnu.so',
'i586-linux-gnu.so',
'i686-linux-gnu.so',
'x86_64-linux-gnux32.so',
)),
suffix,
)
else: # 8 byte pointer size
self.assertTrue(suffix.endswith('x86_64-linux-gnu.so'), suffix)
@unittest.skipUnless(sys.platform == 'darwin', 'OS X-specific test')
def test_osx_ext_suffix(self):
suffix = sysconfig.get_config_var('EXT_SUFFIX')
self.assertTrue(suffix.endswith('-darwin.so'), suffix)
class MakefileTests(unittest.TestCase):
@impl_detail("Test is not PyPy compatible", pypy=False)
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
print("var6=${var3}/lib/python3.5/config-$(VAR2)$(var5)"
"-x86_64-linux-gnu", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
'var6': '42/lib/python3.5/config-b42dollar$5-x86_64-linux-gnu',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
|
|
#-*- coding:utf-8 -*-
"""
This module contains functions and classes for computing similarities across
a collection of vectors.
"""
#Authors: Marcel Caraciolo <marcel@muricoca.com>
#License: BSD Style
import numpy as np
from scikits.crab.similarities.base import BaseSimilarity
from scikits.crab.metrics.pairwise import loglikehood_coefficient
def find_common_elements(source_preferences, target_preferences):
''' Returns the preferences from both vectors '''
src = dict(source_preferences)
tgt = dict(target_preferences)
inter = np.intersect1d(src.keys(), tgt.keys())
common_preferences = zip(*[(src[item], tgt[item]) for item in inter \
if not np.isnan(src[item]) and not np.isnan(tgt[item])])
if common_preferences:
return np.asarray([common_preferences[0]]), np.asarray([common_preferences[1]])
else:
return np.asarray([[]]), np.asarray([[]])
###############################################################################
# User Similarity
class UserSimilarity(BaseSimilarity):
'''
Returns the degree of similarity, of two users, based on the their preferences.
Implementations of this class define a notion of similarity between two users.
Implementations should return values in the range 0.0 to 1.0, with 1.0 representing
perfect similarity.
Parameters
----------
`model`: DataModel
Defines the data model where data is fetched.
`distance`: Function
Pairwise Function between two vectors.
`num_best`: int
If it is left unspecified, similarity queries return a full list (one
float for every item in the model, including the query item).
If `num_best` is set, queries return `num_best` most similar items, as a
sorted list.
Methods
---------
get_similarity()
Return similarity of the `source_id` to a specific `target_id` in the model.
get_similarities()
Return similarity of the `source_id` to all sources in the model.
Examples
---------
>>> from scikits.crab.models.classes import MatrixPreferenceDataModel
>>> from scikits.crab.metrics.pairwise import cosine_distances
>>> from scikits.crab.similarities.basic_similarities import UserSimilarity
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixPreferenceDataModel(movies)
>>> similarity = UserSimilarity(model, cosine_distances, 3)
>>> similarity['Marcel Caraciolo'] #doctest: +NORMALIZE_WHITESPACE
[('Marcel Caraciolo', 1.0), ('Sheldom', 0.99127582693458016),
('Lorena Abreu', 0.98658676452792504)]
'''
def __init__(self, model, distance, num_best=None):
BaseSimilarity.__init__(self, model, distance, num_best)
self.cache_similarity = {}
def get_similarity(self, source_id, target_id):
#TODO:
# Repeated evaluation of a pair.
# Can make a cache to save some computation.
#if source_id < target_id:
# t = (source_id, target_id)
#else:
# t = (target_id, source_id)
t = (source_id, target_id)
if t in self.cache_similarity:
return self.cache_similarity[t]
# == True: meaning that the model is not a Boolean matrix
if self.model.has_preference_values():
#source_preferences, target_preferences = \
# find_common_elements(source_preferences, target_preferences)
# find_common_elements returns values without keys
d_source_preferences = self.model.dataset[source_id]
d_target_preferences = self.model.dataset[target_id]
intersection = set(d_source_preferences.keys()) & set(d_target_preferences.keys())
# Array from generator
# (Best of the three)
source_preferences = np.fromiter((d_source_preferences[key] for key in intersection), int, len(intersection))
target_preferences = np.fromiter((d_target_preferences[key] for key in intersection), int, len(intersection))
# List comprehension
#source_preferences = np.array([d_source_preferences[key] for key in intersection])
#target_preferences = np.array([d_target_preferences[key] for key in intersection])
# Manual space allocation and look up
#source_preferences = np.zeros(len(intersection))
#target_preferences = np.zeros(len(intersection))
#for (i, key) in enumerate(intersection):
# source_preferences[i] = d_source_preferences[key]
# target_preferences[i] = d_target_preferences[key]
else:
source_preferences = self.model.preferences_from_user(source_id)
target_preferences = self.model.preferences_from_user(target_id)
if source_preferences.ndim == 1 and target_preferences.ndim == 1:
source_preferences = np.asarray([source_preferences])
target_preferences = np.asarray([target_preferences])
#TODO:
# Special case?
# The Similarity class should accept `model` as the parameter
# so that they can adjust to this situation.
if self.distance == loglikehood_coefficient:
return self.distance(self.model.items_count(), \
source_preferences, target_preferences) \
if not source_preferences.shape[1] == 0 and \
not target_preferences.shape[1] == 0 else np.array([[np.nan]])
#evaluate the similarity between the two users vectors.
d = self.distance(source_preferences, target_preferences) \
if not source_preferences.shape[1] == 0 \
and not target_preferences.shape[1] == 0 else np.array([[np.nan]])
self.cache_similarity[t] = d
return d
def get_similarities(self, source_id):
#TODO:
# cache
return[(other_id, self.get_similarity(source_id, other_id)) for other_id, v in self.model]
def __iter__(self):
"""
For each object in model, compute the similarity function against all other objects and yield the result.
"""
for source_id, preferences in self.model:
yield source_id, self[source_id]
###############################################################################
# Item Similarity
class ItemSimilarity(BaseSimilarity):
'''
Returns the degree of similarity, of two items, based on its preferences by the users.
Implementations of this class define a notion of similarity between two items.
Implementations should return values in the range 0.0 to 1.0, with 1.0 representing
perfect similarity.
Parameters
----------
`model`: DataModel
Defines the data model where data is fetched.
`distance`: Function
Pairwise Function between two vectors.
`num_best`: int
If it is left unspecified, similarity queries return a full list (one
float for every item in the model, including the query item).
If `num_best` is set, queries return `num_best` most similar items, as a
sorted list.
Methods
---------
get_similarity()
Return similarity of the `source_id` to a specific `target_id` in the model.
get_similarities()
Return similarity of the `source_id` to all sources in the model.
Examples
---------
>>> from scikits.crab.models.classes import MatrixPreferenceDataModel
>>> from scikits.crab.metrics.pairwise import cosine_distances
>>> from scikits.crab.similarities.basic_similarities import ItemSimilarity
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixPreferenceDataModel(movies)
>>> similarity = ItemSimilarity(model, cosine_distances, 3)
>>> similarity['The Night Listener'] #doctest: +NORMALIZE_WHITESPACE
[('The Night Listener', 1.0), ('Lady in the Water', 0.98188311415053031),
('Just My Luck', 0.97489347126452108)]
'''
def __init__(self, model, distance, num_best=None):
BaseSimilarity.__init__(self, model, distance, num_best)
def get_similarity(self, source_id, target_id):
source_preferences = self.model.preferences_for_item(source_id)
target_preferences = self.model.preferences_for_item(target_id)
if self.model.has_preference_values():
source_preferences, target_preferences = \
find_common_elements(source_preferences, target_preferences)
if source_preferences.ndim == 1 and target_preferences.ndim == 1:
source_preferences = np.asarray([source_preferences])
target_preferences = np.asarray([target_preferences])
if self.distance == loglikehood_coefficient:
return self.distance(self.model.items_count(), \
source_preferences, target_preferences) \
if not source_preferences.shape[1] == 0 and \
not target_preferences.shape[1] == 0 else np.array([[np.nan]])
#Evaluate the similarity between the two users vectors.
return self.distance(source_preferences, target_preferences) \
if not source_preferences.shape[1] == 0 and \
not target_preferences.shape[1] == 0 else np.array([[np.nan]])
def get_similarities(self, source_id):
return [(other_id, self.get_similarity(source_id, other_id)) for other_id in self.model.item_ids()]
def __iter__(self):
"""
For each object in model, compute the similarity function against all other objects and yield the result.
"""
for item_id in self.model.item_ids():
yield item_id, self[item_id]
|
|
import collections
import subprocess
from . import protocols
from . import transports
from .coroutines import coroutine, From
from .log import logger
class BaseSubprocessTransport(transports.SubprocessTransport):
def __init__(self, loop, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
super(BaseSubprocessTransport, self).__init__(extra)
self._protocol = protocol
self._loop = loop
self._pid = None
self._pipes = {}
if stdin == subprocess.PIPE:
self._pipes[0] = None
if stdout == subprocess.PIPE:
self._pipes[1] = None
if stderr == subprocess.PIPE:
self._pipes[2] = None
self._pending_calls = collections.deque()
self._finished = False
self._returncode = None
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, bufsize=bufsize, **kwargs)
self._pid = self._proc.pid
self._extra['subprocess'] = self._proc
if self._loop.get_debug():
if isinstance(args, (bytes, str)):
program = args
else:
program = args[0]
logger.debug('process %r created: pid %s',
program, self._pid)
def __repr__(self):
info = [self.__class__.__name__, 'pid=%s' % self._pid]
if self._returncode is not None:
info.append('returncode=%s' % self._returncode)
stdin = self._pipes.get(0)
if stdin is not None:
info.append('stdin=%s' % stdin.pipe)
stdout = self._pipes.get(1)
stderr = self._pipes.get(2)
if stdout is not None and stderr is stdout:
info.append('stdout=stderr=%s' % stdout.pipe)
else:
if stdout is not None:
info.append('stdout=%s' % stdout.pipe)
if stderr is not None:
info.append('stderr=%s' % stderr.pipe)
return '<%s>' % ' '.join(info)
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
raise NotImplementedError
def _make_write_subprocess_pipe_proto(self, fd):
raise NotImplementedError
def _make_read_subprocess_pipe_proto(self, fd):
raise NotImplementedError
def close(self):
for proto in self._pipes.values():
if proto is None:
continue
proto.pipe.close()
if self._returncode is None:
self.terminate()
def get_pid(self):
return self._pid
def get_returncode(self):
return self._returncode
def get_pipe_transport(self, fd):
if fd in self._pipes:
return self._pipes[fd].pipe
else:
return None
def send_signal(self, signal):
self._proc.send_signal(signal)
def terminate(self):
self._proc.terminate()
def kill(self):
self._proc.kill()
def _kill_wait(self):
"""Close pipes, kill the subprocess and read its return status.
Function called when an exception is raised during the creation
of a subprocess.
"""
if self._loop.get_debug():
logger.warning('Exception during subprocess creation, '
'kill the subprocess %r',
self,
exc_info=True)
proc = self._proc
if proc.stdout:
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
if proc.stdin:
proc.stdin.close()
try:
proc.kill()
except ProcessLookupError:
pass
self._returncode = proc.wait()
@coroutine
def _post_init(self):
try:
proc = self._proc
loop = self._loop
if proc.stdin is not None:
_, pipe = yield From(loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0),
proc.stdin))
self._pipes[0] = pipe
if proc.stdout is not None:
_, pipe = yield From(loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1),
proc.stdout))
self._pipes[1] = pipe
if proc.stderr is not None:
_, pipe = yield From(loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2),
proc.stderr))
self._pipes[2] = pipe
assert self._pending_calls is not None
self._loop.call_soon(self._protocol.connection_made, self)
for callback, data in self._pending_calls:
self._loop.call_soon(callback, *data)
self._pending_calls = None
except:
self._kill_wait()
raise
def _call(self, cb, *data):
if self._pending_calls is not None:
self._pending_calls.append((cb, data))
else:
self._loop.call_soon(cb, *data)
def _pipe_connection_lost(self, fd, exc):
self._call(self._protocol.pipe_connection_lost, fd, exc)
self._try_finish()
def _pipe_data_received(self, fd, data):
self._call(self._protocol.pipe_data_received, fd, data)
def _process_exited(self, returncode):
assert returncode is not None, returncode
assert self._returncode is None, self._returncode
if self._loop.get_debug():
logger.info('%r exited with return code %r',
self, returncode)
self._returncode = returncode
self._call(self._protocol.process_exited)
self._try_finish()
def _try_finish(self):
assert not self._finished
if self._returncode is None:
return
if all(p is not None and p.disconnected
for p in self._pipes.values()):
self._finished = True
self._call(self._call_connection_lost, None)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._proc = None
self._protocol = None
self._loop = None
class WriteSubprocessPipeProto(protocols.BaseProtocol):
def __init__(self, proc, fd):
self.proc = proc
self.fd = fd
self.pipe = None
self.disconnected = False
def connection_made(self, transport):
self.pipe = transport
def __repr__(self):
return ('<%s fd=%s pipe=%r>'
% (self.__class__.__name__, self.fd, self.pipe))
def connection_lost(self, exc):
self.disconnected = True
self.proc._pipe_connection_lost(self.fd, exc)
self.proc = None
def pause_writing(self):
self.proc._protocol.pause_writing()
def resume_writing(self):
self.proc._protocol.resume_writing()
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
protocols.Protocol):
def data_received(self, data):
self.proc._pipe_data_received(self.fd, data)
|
|
#!/usr/bin/env python
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from contextlib import closing
import os
import socket
from tornado.concurrent import Future
from tornado.netutil import bind_sockets, Resolver
from tornado.queues import Queue
from tornado.tcpclient import TCPClient, _Connector
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncTestCase, gen_test
from tornado.test.util import skipIfNoIPv6, unittest, refusing_port, skipIfNonUnix, skipOnTravis
from tornado.gen import TimeoutError
# Fake address families for testing. Used in place of AF_INET
# and AF_INET6 because some installations do not have AF_INET6.
AF1, AF2 = 1, 2
class TestTCPServer(TCPServer):
def __init__(self, family):
super(TestTCPServer, self).__init__()
self.streams = []
self.queue = Queue()
sockets = bind_sockets(None, 'localhost', family)
self.add_sockets(sockets)
self.port = sockets[0].getsockname()[1]
def handle_stream(self, stream, address):
self.streams.append(stream)
self.queue.put(stream)
def stop(self):
super(TestTCPServer, self).stop()
for stream in self.streams:
stream.close()
class TCPClientTest(AsyncTestCase):
def setUp(self):
super(TCPClientTest, self).setUp()
self.server = None
self.client = TCPClient()
def start_server(self, family):
if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ:
self.skipTest("dual-stack servers often have port conflicts on travis")
self.server = TestTCPServer(family)
return self.server.port
def stop_server(self):
if self.server is not None:
self.server.stop()
self.server = None
def tearDown(self):
self.client.close()
self.stop_server()
super(TCPClientTest, self).tearDown()
def skipIfLocalhostV4(self):
# The port used here doesn't matter, but some systems require it
# to be non-zero if we do not also pass AI_PASSIVE.
Resolver().resolve('localhost', 80, callback=self.stop)
addrinfo = self.wait()
families = set(addr[0] for addr in addrinfo)
if socket.AF_INET6 not in families:
self.skipTest("localhost does not resolve to ipv6")
@gen_test
def do_test_connect(self, family, host, source_ip=None, source_port=None):
port = self.start_server(family)
stream = yield self.client.connect(host, port,
source_ip=source_ip,
source_port=source_port)
server_stream = yield self.server.queue.get()
with closing(stream):
stream.write(b"hello")
data = yield server_stream.read_bytes(5)
self.assertEqual(data, b"hello")
def test_connect_ipv4_ipv4(self):
self.do_test_connect(socket.AF_INET, '127.0.0.1')
def test_connect_ipv4_dual(self):
self.do_test_connect(socket.AF_INET, 'localhost')
@skipIfNoIPv6
def test_connect_ipv6_ipv6(self):
self.skipIfLocalhostV4()
self.do_test_connect(socket.AF_INET6, '::1')
@skipIfNoIPv6
def test_connect_ipv6_dual(self):
self.skipIfLocalhostV4()
if Resolver.configured_class().__name__.endswith('TwistedResolver'):
self.skipTest('TwistedResolver does not support multiple addresses')
self.do_test_connect(socket.AF_INET6, 'localhost')
def test_connect_unspec_ipv4(self):
self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1')
@skipIfNoIPv6
def test_connect_unspec_ipv6(self):
self.skipIfLocalhostV4()
self.do_test_connect(socket.AF_UNSPEC, '::1')
def test_connect_unspec_dual(self):
self.do_test_connect(socket.AF_UNSPEC, 'localhost')
@gen_test
def test_refused_ipv4(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with self.assertRaises(IOError):
yield self.client.connect('127.0.0.1', port)
def test_source_ip_fail(self):
'''
Fail when trying to use the source IP Address '8.8.8.8'.
'''
self.assertRaises(socket.error,
self.do_test_connect,
socket.AF_INET,
'127.0.0.1',
source_ip='8.8.8.8')
def test_source_ip_success(self):
'''
Success when trying to use the source IP Address '127.0.0.1'
'''
self.do_test_connect(socket.AF_INET, '127.0.0.1', source_ip='127.0.0.1')
@skipIfNonUnix
def test_source_port_fail(self):
'''
Fail when trying to use source port 1.
'''
self.assertRaises(socket.error,
self.do_test_connect,
socket.AF_INET,
'127.0.0.1',
source_port=1)
@gen_test
def test_connect_timeout(self):
timeout = 0.05
class TimeoutResolver(Resolver):
def resolve(self, *args, **kwargs):
return Future() # never completes
with self.assertRaises(TimeoutError):
yield TCPClient(resolver=TimeoutResolver()).connect(
'1.2.3.4', 12345, timeout=timeout)
class TestConnectorSplit(unittest.TestCase):
def test_one_family(self):
# These addresses aren't in the right format, but split doesn't care.
primary, secondary = _Connector.split(
[(AF1, 'a'),
(AF1, 'b')])
self.assertEqual(primary, [(AF1, 'a'),
(AF1, 'b')])
self.assertEqual(secondary, [])
def test_mixed(self):
primary, secondary = _Connector.split(
[(AF1, 'a'),
(AF2, 'b'),
(AF1, 'c'),
(AF2, 'd')])
self.assertEqual(primary, [(AF1, 'a'), (AF1, 'c')])
self.assertEqual(secondary, [(AF2, 'b'), (AF2, 'd')])
class ConnectorTest(AsyncTestCase):
class FakeStream(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def setUp(self):
super(ConnectorTest, self).setUp()
self.connect_futures = {}
self.streams = {}
self.addrinfo = [(AF1, 'a'), (AF1, 'b'),
(AF2, 'c'), (AF2, 'd')]
def tearDown(self):
# Unless explicitly checked (and popped) in the test, we shouldn't
# be closing any streams
for stream in self.streams.values():
self.assertFalse(stream.closed)
super(ConnectorTest, self).tearDown()
def create_stream(self, af, addr):
stream = ConnectorTest.FakeStream()
self.streams[addr] = stream
future = Future()
self.connect_futures[(af, addr)] = future
return stream, future
def assert_pending(self, *keys):
self.assertEqual(sorted(self.connect_futures.keys()), sorted(keys))
def resolve_connect(self, af, addr, success):
future = self.connect_futures.pop((af, addr))
if success:
future.set_result(self.streams[addr])
else:
self.streams.pop(addr)
future.set_exception(IOError())
# Run the loop to allow callbacks to be run.
self.io_loop.add_callback(self.stop)
self.wait()
def assert_connector_streams_closed(self, conn):
for stream in conn.streams:
self.assertTrue(stream.closed)
def start_connect(self, addrinfo):
conn = _Connector(addrinfo, self.create_stream)
# Give it a huge timeout; we'll trigger timeouts manually.
future = conn.start(3600, connect_timeout=self.io_loop.time() + 3600)
return conn, future
def test_immediate_success(self):
conn, future = self.start_connect(self.addrinfo)
self.assertEqual(list(self.connect_futures.keys()),
[(AF1, 'a')])
self.resolve_connect(AF1, 'a', True)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
def test_immediate_failure(self):
# Fail with just one address.
conn, future = self.start_connect([(AF1, 'a')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assertRaises(IOError, future.result)
def test_one_family_second_try(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', True)
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
def test_one_family_second_try_failure(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', False)
self.assertRaises(IOError, future.result)
def test_one_family_second_try_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
# trigger the timeout while the first lookup is pending;
# nothing happens.
conn.on_timeout()
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', True)
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
def test_two_families_immediate_failure(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'), (AF2, 'c'))
self.resolve_connect(AF1, 'b', False)
self.resolve_connect(AF2, 'c', True)
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
def test_two_families_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF2, 'c', True)
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
# resolving 'a' after the connection has completed doesn't start 'b'
self.resolve_connect(AF1, 'a', False)
self.assert_pending()
def test_success_after_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF1, 'a', True)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
# resolving 'c' after completion closes the connection.
self.resolve_connect(AF2, 'c', True)
self.assertTrue(self.streams.pop('c').closed)
def test_all_fail(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF2, 'c', False)
self.assert_pending((AF1, 'a'), (AF2, 'd'))
self.resolve_connect(AF2, 'd', False)
# one queue is now empty
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.assertFalse(future.done())
self.resolve_connect(AF1, 'b', False)
self.assertRaises(IOError, future.result)
def test_one_family_timeout_after_connect_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
conn.on_connect_timeout()
# the connector will close all streams on connect timeout, we
# should explicitly pop the connect_future.
self.connect_futures.pop((AF1, 'a'))
self.assertTrue(self.streams.pop('a').closed)
conn.on_timeout()
# if the future is set with TimeoutError, we will not iterate next
# possible address.
self.assert_pending()
self.assertEqual(len(conn.streams), 1)
self.assert_connector_streams_closed(conn)
self.assertRaises(TimeoutError, future.result)
def test_one_family_success_before_connect_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', True)
conn.on_connect_timeout()
self.assert_pending()
self.assertEqual(self.streams['a'].closed, False)
# success stream will be pop
self.assertEqual(len(conn.streams), 0)
# streams in connector should be closed after connect timeout
self.assert_connector_streams_closed(conn)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
def test_one_family_second_try_after_connect_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
conn.on_connect_timeout()
self.connect_futures.pop((AF1, 'b'))
self.assertTrue(self.streams.pop('b').closed)
self.assert_pending()
self.assertEqual(len(conn.streams), 2)
self.assert_connector_streams_closed(conn)
self.assertRaises(TimeoutError, future.result)
def test_one_family_second_try_failure_before_connect_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', False)
conn.on_connect_timeout()
self.assert_pending()
self.assertEqual(len(conn.streams), 2)
self.assert_connector_streams_closed(conn)
self.assertRaises(IOError, future.result)
def test_two_family_timeout_before_connect_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
conn.on_connect_timeout()
self.connect_futures.pop((AF1, 'a'))
self.assertTrue(self.streams.pop('a').closed)
self.connect_futures.pop((AF2, 'c'))
self.assertTrue(self.streams.pop('c').closed)
self.assert_pending()
self.assertEqual(len(conn.streams), 2)
self.assert_connector_streams_closed(conn)
self.assertRaises(TimeoutError, future.result)
def test_two_family_success_after_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF1, 'a', True)
# if one of streams succeed, connector will close all other streams
self.connect_futures.pop((AF2, 'c'))
self.assertTrue(self.streams.pop('c').closed)
self.assert_pending()
self.assertEqual(len(conn.streams), 1)
self.assert_connector_streams_closed(conn)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
def test_two_family_timeout_after_connect_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_connect_timeout()
self.connect_futures.pop((AF1, 'a'))
self.assertTrue(self.streams.pop('a').closed)
self.assert_pending()
conn.on_timeout()
# if the future is set with TimeoutError, connector will not
# trigger secondary address.
self.assert_pending()
self.assertEqual(len(conn.streams), 1)
self.assert_connector_streams_closed(conn)
self.assertRaises(TimeoutError, future.result)
|
|
import os
import cv2
import time
import pickle
import datetime
import matplotlib.pyplot as plt
from math import floor, sqrt
from mpl_toolkits.mplot3d import Axes3D
from tensorflow.python.framework import graph_io
from tensorflow.python.tools import freeze_graph
from NN.TF.Layers import *
from Util.Util import Util, VisUtil
from Util.Bases import TFClassifierBase
from Util.ProgressBar import ProgressBar
# TODO: Saving NNPipe; Add 'idx' param to 'get_rs' method
class NNVerbose:
NONE = 0
EPOCH = 1
ITER = 1.5
METRICS = 2
METRICS_DETAIL = 3
DETAIL = 4
DEBUG = 5
class NNConfig:
BOOST_LESS_SAMPLES = False
TRAINING_SCALE = 5 / 6
BATCH_SIZE = 1e6
# Neural Network
class NNBase(TFClassifierBase):
NNTiming = Timing()
def __init__(self, **kwargs):
super(NNBase, self).__init__(**kwargs)
self._layers = []
self._optimizer = None
self._w_stds, self._b_inits = [], []
self._layer_names, self._layer_params = [], []
self._lr = 0
self.verbose = 1
self._current_dimension = 0
self._logs = {}
self._metrics, self._metric_names, self._metric_rs = [], [], []
self._loaded = False
self._x_min = self._x_max = self._y_min = self._y_max = 0
self._transferred_flags = {"train": False, "test": False}
self._activations = None
self._loss = self._train_step = None
self._layer_factory = LayerFactory()
self._tf_weights, self._tf_bias = [], []
@property
def name(self):
return (
"-".join([str(_layer.shape[1]) for _layer in self._layers]) +
" at {}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
)
@NNTiming.timeit(level=4)
def _get_w(self, shape):
if self._w_stds[-1] is None:
self._w_stds[-1] = sqrt(2 / sum(shape))
initial = tf.truncated_normal(shape, stddev=self._w_stds[-1])
return tf.Variable(initial, name="w")
@NNTiming.timeit(level=4)
def _get_b(self, shape):
return tf.Variable(np.zeros(shape, dtype=np.float32) + self._b_inits[-1], name="b")
@NNTiming.timeit(level=4)
def _get_tb_name(self, layer):
return "{}_{}".format(layer.position, layer.name)
@staticmethod
@NNTiming.timeit(level=4)
def _summary_var(var):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.summary.scalar("mean", mean)
with tf.name_scope("std"):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("std", stddev)
# noinspection PyTypeChecker
@NNTiming.timeit(level=4)
def _add_params(self, layer, shape, conv_channel=None, fc_shape=None, apply_bias=True):
if fc_shape is not None:
w_shape = (fc_shape, shape[1])
b_shape = shape[1],
elif conv_channel is not None:
if len(shape[1]) <= 2:
w_shape = shape[1][0], shape[1][1], conv_channel, conv_channel
else:
w_shape = (shape[1][1], shape[1][2], conv_channel, shape[1][0])
b_shape = shape[1][0],
else:
w_shape = shape
b_shape = shape[1],
_new_w = self._get_w(w_shape)
_new_b = self._get_b(b_shape) if apply_bias else None
self._tf_weights.append(_new_w)
if apply_bias:
self._tf_bias.append(_new_b)
else:
self._tf_bias.append(None)
with tf.name_scope(self._get_tb_name(layer)):
with tf.name_scope("weight"):
NNBase._summary_var(_new_w)
if layer.apply_bias:
with tf.name_scope("bias"):
NNBase._summary_var(_new_b)
@NNTiming.timeit(level=4)
def _add_param_placeholder(self):
self._tf_weights.append(tf.constant([.0]))
self._tf_bias.append(tf.constant([.0]))
@NNTiming.timeit(level=4)
def _add_layer(self, layer, *args, **kwargs):
if not self._layers and isinstance(layer, str):
if layer.lower() == "pipe":
self._layers.append(NNPipe(args[0]))
self._add_param_placeholder()
return
_layer = self._layer_factory.get_root_layer_by_name(layer, *args, **kwargs)
if _layer:
self.add(_layer, pop_last_init=True)
return
_parent = self._layers[-1]
if isinstance(_parent, CostLayer):
raise BuildLayerError("Adding layer after CostLayer is not permitted")
if isinstance(_parent, NNPipe):
self._current_dimension = _parent.shape[1]
if isinstance(layer, str):
if layer.lower() == "pipe":
self._layers.append(NNPipe(args[0]))
self._add_param_placeholder()
return
layer, shape = self._layer_factory.get_layer_by_name(
layer, _parent, self._current_dimension, *args, **kwargs
)
if shape is None:
self.add(layer, pop_last_init=True)
return
_current, _next = shape
else:
_current, _next = args
if isinstance(layer, SubLayer):
if _current != _parent.shape[1]:
raise BuildLayerError("Output shape should be identical with input shape "
"if chosen SubLayer is not a CostLayer")
self.parent = _parent
self._layers.append(layer)
self._add_param_placeholder()
self._current_dimension = _next
else:
fc_shape, conv_channel, last_layer = None, None, self._layers[-1]
if NNBase._is_conv(last_layer):
if NNBase._is_conv(layer):
conv_channel = last_layer.n_filters
_current = (conv_channel, last_layer.out_h, last_layer.out_w)
layer.feed_shape((_current, _next))
else:
layer.is_fc = True
fc_shape = last_layer.out_h * last_layer.out_w * last_layer.n_filters
self._layers.append(layer)
if isinstance(layer, ConvPoolLayer):
self._add_param_placeholder()
else:
self._add_params(layer, (_current, _next), conv_channel, fc_shape)
self._current_dimension = _next
self._update_layer_information(layer)
@NNTiming.timeit(level=4)
def _update_layer_information(self, layer):
self._layer_params.append(layer.params)
if len(self._layer_params) > 1 and not layer.is_sub_layer:
self._layer_params[-1] = ((self._layer_params[-1][0][1],), *self._layer_params[-1][1:])
@staticmethod
@NNTiming.timeit(level=4)
def _is_conv(layer):
return isinstance(layer, ConvLayer) or isinstance(layer, NNPipe)
@NNTiming.timeit(level=1, prefix="[API] ")
def get_rs(self, x, predict=True, pipe=False):
if not isinstance(self._layers[0], NNPipe):
cache = self._layers[0].activate(x, self._tf_weights[0], self._tf_bias[0], predict)
else:
cache = self._layers[0].get_rs(x, predict)
for i, layer in enumerate(self._layers[1:]):
if i == len(self._layers) - 2:
if not pipe:
if NNDist._is_conv(self._layers[i]):
fc_shape = np.prod(cache.get_shape()[1:]) # type: int
cache = tf.reshape(cache, [-1, int(fc_shape)])
if self._tf_bias[-1] is not None:
return tf.matmul(cache, self._tf_weights[-1]) + self._tf_bias[-1]
return tf.matmul(cache, self._tf_weights[-1])
else:
if not isinstance(layer, NNPipe):
return layer.activate(cache, self._tf_weights[i + 1], self._tf_bias[i + 1], predict)
return layer.get_rs(cache, predict)
if not isinstance(layer, NNPipe):
cache = layer.activate(cache, self._tf_weights[i + 1], self._tf_bias[i + 1], predict)
else:
cache = layer.get_rs(cache, predict)
@NNTiming.timeit(level=4, prefix="[API] ")
def add(self, layer, *args, **kwargs):
# Init kwargs
kwargs["apply_bias"] = kwargs.get("apply_bias", True)
kwargs["position"] = kwargs.get("position", len(self._layers) + 1)
self._w_stds.append(kwargs.pop("w_std", None))
self._b_inits.append(kwargs.pop("b_init", 0.1))
if kwargs.pop("pop_last_init", False):
self._w_stds.pop()
self._b_inits.pop()
if isinstance(layer, str):
# noinspection PyTypeChecker
self._add_layer(layer, *args, **kwargs)
else:
if not isinstance(layer, Layer):
raise BuildLayerError("Invalid Layer provided (should be subclass of Layer)")
if not self._layers:
if isinstance(layer, SubLayer):
raise BuildLayerError("Invalid Layer provided (first layer should not be subclass of SubLayer)")
if len(layer.shape) != 2:
raise BuildLayerError("Invalid input Layer provided (shape should be {}, {} found)".format(
2, len(layer.shape)
))
self._layers, self._current_dimension = [layer], layer.shape[1]
self._update_layer_information(layer)
if isinstance(layer, ConvLayer):
self._add_params(layer, layer.shape, layer.n_channels)
else:
self._add_params(layer, layer.shape)
else:
if len(layer.shape) > 2:
raise BuildLayerError("Invalid Layer provided (shape should be {}, {} found)".format(
2, len(layer.shape)
))
if len(layer.shape) == 2:
_current, _next = layer.shape
if isinstance(layer, SubLayer):
if _next != self._current_dimension:
raise BuildLayerError("Invalid SubLayer provided (shape[1] should be {}, {} found)".format(
self._current_dimension, _next
))
elif not NNDist._is_conv(layer) and _current != self._current_dimension:
raise BuildLayerError("Invalid Layer provided (shape[0] should be {}, {} found)".format(
self._current_dimension, _current
))
self._add_layer(layer, _current, _next)
elif len(layer.shape) == 1:
_next = layer.shape[0]
layer.shape = (self._current_dimension, _next)
self._add_layer(layer, self._current_dimension, _next)
else:
raise LayerError("Invalid Layer provided (invalid shape '{}' found)".format(layer.shape))
@NNTiming.timeit(level=4, prefix="[API] ")
def add_pipe_layer(self, idx, layer, shape=None, *args, **kwargs):
_last_layer = self._layers[-1]
if len(self._layers) == 1:
_last_parent = None
else:
_last_parent = self._layers[-2]
if not isinstance(_last_layer, NNPipe):
raise BuildLayerError("Adding pipe layers to a non-NNPipe object is not allowed")
if not _last_layer.initialized[idx] and len(shape) == 1:
if _last_parent is None:
raise BuildLayerError("Adding pipe layers at first without input shape is not allowed")
_dim = (_last_parent.n_filters, _last_parent.out_h, _last_parent.out_w)
shape = (_dim, shape[0])
_last_layer.add(idx, layer, shape, *args, **kwargs)
@NNTiming.timeit(level=4, prefix="[API] ")
def preview(self, verbose=0):
if not self._layers:
rs = "None"
else:
rs = (
"Input : {:<16s} - {}\n".format("Dimension", self._layers[0].shape[0]) +
"\n".join([_layer.info for _layer in self._layers]))
print("=" * 30 + "\n" + "Structure\n" + "-" * 30 + "\n" + rs + "\n" + "-" * 30)
if verbose >= 1:
print("Initial Values\n" + "-" * 30)
print("\n".join(["({:^16s}) w_std: {:8.6} ; b_init: {:8.6}".format(
batch[0].name, float(batch[1]), float(batch[2])) if not isinstance(
batch[0], NNPipe) else "({:^16s}) ({:^3d})".format(
"Pipe", len(batch[0]["nn_lst"])
) for batch in zip(self._layers, self._w_stds, self._b_inits) if not isinstance(
batch[0], SubLayer) and not isinstance(
batch[0], CostLayer) and not isinstance(
batch[0], ConvPoolLayer)])
)
if verbose >= 2:
for layer in self._layers:
if isinstance(layer, NNPipe):
layer.preview()
print("-" * 30)
class NNDist(NNBase):
NNTiming = Timing()
def __init__(self, **kwargs):
super(NNDist, self).__init__(**kwargs)
self._sess = tf.Session()
self._optimizer_factory = OptFactory()
self._available_metrics = {
"acc": self.acc, "_acc": self.acc,
"f1": self.f1_score, "_f1_score": self.f1_score
}
@NNTiming.timeit(level=4, prefix="[Initialize] ")
def initialize(self):
self._layers = []
self._optimizer = None
self._w_stds, self._b_inits = [], []
self._layer_names, self._layer_params = [], []
self._lr = 0
self.verbose = 1
self._current_dimension = 0
self._logs = {}
self._metrics, self._metric_names, self._metric_rs = [], [], []
self._loaded = False
self._x_min = self._x_max = self._y_min = self._y_max = 0
self._transferred_flags = {"train": False, "test": False}
self._activations = None
self._loss = self._train_step = None
self._layer_factory = LayerFactory()
self._tf_weights, self._tf_bias = [], []
self._sess = tf.Session()
# Property
@property
def layer_names(self):
return [layer.name for layer in self._layers]
@layer_names.setter
def layer_names(self, value):
self._layer_names = value
@property
def layer_special_params(self):
return [layer.get_special_params(self._sess) for layer in self._layers]
@layer_special_params.setter
def layer_special_params(self, value):
for layer, sp_param in zip(self._layers, value):
if sp_param is not None:
layer.set_special_params(sp_param)
@property
def optimizer(self):
return self._optimizer.name
@optimizer.setter
def optimizer(self, value):
self._optimizer = value
# Utils
@staticmethod
@NNTiming.timeit(level=4, prefix="[Private StaticMethod] ")
def _transfer_x(x):
if len(x.shape) == 1:
x = x.reshape(1, -1)
if len(x.shape) == 4:
x = x.transpose(0, 2, 3, 1)
return x.astype(np.float32)
@NNTiming.timeit(level=4)
def _feed_data(self, x, y):
if not self._transferred_flags["train"]:
x = NNDist._transfer_x(x)
self._transferred_flags["train"] = True
y = np.asarray(y, dtype=np.float32)
if len(x) != len(y):
raise BuildNetworkError("Data fed to network should be identical in length, x: {} and y: {} found".format(
len(x), len(y)
))
self._x_min, self._x_max = np.min(x), np.max(x)
self._y_min, self._y_max = np.min(y), np.max(y)
return x, y
@NNTiming.timeit(level=2)
def _get_prediction(self, x, name=None, verbose=None):
if verbose is None:
verbose = self.verbose
fc_shape = np.prod(x.shape[1:]) # type: int
single_batch = int(NNConfig.BATCH_SIZE / fc_shape)
if not single_batch:
single_batch = 1
if not single_batch:
single_batch = 1
if single_batch >= len(x):
return self._sess.run(self._y_pred, {self._tfx: x})
epoch = int(len(x) / single_batch)
if not len(x) % single_batch:
epoch += 1
name = "Prediction" if name is None else "Prediction ({})".format(name)
sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
if verbose >= NNVerbose.METRICS:
sub_bar.start()
rs, count = [], 0
while count < len(x):
count += single_batch
if count >= len(x):
rs.append(self._sess.run(self._y_pred, {self._tfx: x[count - single_batch:]}))
else:
rs.append(self._sess.run(self._y_pred, {self._tfx: x[count - single_batch:count]}))
if verbose >= NNVerbose.METRICS:
sub_bar.update()
return np.vstack(rs)
@NNTiming.timeit(level=4)
def _get_activations(self, x, predict=False):
if not isinstance(self._layers[0], NNPipe):
activations = [self._layers[0].activate(x, self._tf_weights[0], self._tf_bias[0], predict)]
else:
activations = [self._layers[0].get_rs(x, predict)]
for i, layer in enumerate(self._layers[1:]):
if i == len(self._layers) - 2:
if NNDist._is_conv(self._layers[i]):
fc_shape = np.prod(activations[-1].get_shape()[1:]) # type: int
activations[-1] = tf.reshape(activations[-1], [-1, int(fc_shape)])
if self._tf_bias[-1] is not None:
activations.append(tf.matmul(activations[-1], self._tf_weights[-1]) + self._tf_bias[-1])
else:
activations.append(tf.matmul(activations[-1], self._tf_weights[-1]))
else:
if not isinstance(layer, NNPipe):
activations.append(layer.activate(
activations[-1], self._tf_weights[i + 1], self._tf_bias[i + 1], predict))
else:
activations.append(layer.get_rs(activations[-1], predict))
return activations
@NNTiming.timeit(level=1)
def _get_l2_losses(self, lb):
if lb <= 0:
return 0.
return [lb * tf.nn.l2_loss(w) for l, w in zip(self._layers, self._tf_weights)
if not isinstance(l, SubLayer) and not isinstance(l, ConvPoolLayer)]
@NNTiming.timeit(level=1)
def _get_acts(self, x):
with self._sess.as_default():
activations = [_ac.eval() for _ac in self._get_activations(x, True)]
return activations
@NNTiming.timeit(level=3)
def _append_log(self, x, y, y_pred, name, get_loss):
if y_pred is None:
y_pred = self._get_prediction(x, name)
for i, metric_rs in enumerate(self._metric_rs):
self._logs[name][i].append(metric_rs.eval({
self._tfy: y, self._y_pred: y_pred
}))
if get_loss:
self._logs[name][-1].append(
self._loss.eval({self._tfy: y, self._y_pred: y_pred})
)
@NNTiming.timeit(level=3)
def _print_metric_logs(self, name, show_loss):
print()
print("=" * 47)
for i, metric in enumerate(self._metric_names):
print("{:<16s} {:<16s}: {:12.8}".format(
name, metric, self._logs[name][i][-1]))
if show_loss:
print("{:<16s} {:<16s}: {:12.8}".format(
name, "loss", self._logs[name][-1][-1]))
print("=" * 47)
@NNTiming.timeit(level=1)
def _draw_2d_network(self, radius=6, width=1200, height=800, padding=0.2,
plot_scale=2, plot_precision=0.03,
sub_layer_height_scale=0, **kwargs):
if not kwargs["show"] and not kwargs["mp4"]:
return
layers = len(self._layers) + 1
units = [layer.shape[0] for layer in self._layers] + [self._layers[-1].shape[1]]
whether_sub_layers = np.array([False] + [isinstance(layer, SubLayer) for layer in self._layers])
n_sub_layers = np.sum(whether_sub_layers) # type: int
plot_num = int(1 / plot_precision)
if plot_num % 2 == 1:
plot_num += 1
half_plot_num = int(plot_num * 0.5)
xf = np.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num)
yf = np.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num) * -1
input_x, input_y = np.meshgrid(xf, yf)
input_xs = np.c_[input_x.ravel().astype(np.float32), input_y.ravel().astype(np.float32)]
activations = self._sess.run(self._activations, {self._tfx: input_xs})
activations = [activation.T.reshape(units[i + 1], plot_num, plot_num)
for i, activation in enumerate(activations)]
graphs = []
for j, activation in enumerate(activations):
graph_group = []
if j == len(activations) - 1:
classes = np.argmax(activation, axis=0)
else:
classes = None
for k, ac in enumerate(activation):
data = np.zeros((plot_num, plot_num, 3), np.uint8)
if j != len(activations) - 1:
mask = ac >= np.average(ac)
else:
mask = classes == k
data[mask], data[~mask] = [0, 165, 255], [255, 165, 0]
graph_group.append(data)
graphs.append(graph_group)
img = np.full([height, width, 3], 255, dtype=np.uint8)
axis0_padding = int(height / (layers - 1 + 2 * padding)) * padding + plot_num
axis0_step = (height - 2 * axis0_padding) / layers
sub_layer_decrease = int((1 - sub_layer_height_scale) * axis0_step)
axis0 = np.linspace(
axis0_padding,
height + n_sub_layers * sub_layer_decrease - axis0_padding,
layers, dtype=np.int)
axis0 -= sub_layer_decrease * np.cumsum(whether_sub_layers)
axis1_padding = plot_num
axis1 = [np.linspace(axis1_padding, width - axis1_padding, unit + 2, dtype=np.int)
for unit in units]
axis1 = [axis[1:-1] for axis in axis1]
colors, thicknesses, masks = [], [], []
for weight in self._tf_weights:
line_info = VisUtil.get_line_info(weight.eval())
colors.append(line_info[0])
thicknesses.append(line_info[1])
masks.append(line_info[2])
for i, (y, xs) in enumerate(zip(axis0, axis1)):
for j, x in enumerate(xs):
if i == 0:
cv2.circle(img, (x, y), radius, (20, 215, 20), int(radius / 2))
else:
graph = graphs[i - 1][j]
img[y - half_plot_num:y + half_plot_num, x - half_plot_num:x + half_plot_num] = graph
if i > 0:
cv2.putText(img, self._layers[i - 1].name, (12, y - 36), cv2.LINE_AA, 0.6, (0, 0, 0), 1)
for i, y in enumerate(axis0):
if i == len(axis0) - 1:
break
for j, x in enumerate(axis1[i]):
new_y = axis0[i + 1]
whether_sub_layer = isinstance(self._layers[i], SubLayer)
for k, new_x in enumerate(axis1[i + 1]):
if whether_sub_layer and j != k:
continue
if masks[i][j][k]:
cv2.line(img, (x, y + half_plot_num), (new_x, new_y - half_plot_num),
colors[i][j][k], thicknesses[i][j][k])
return img
# Init
@NNTiming.timeit(level=4)
def _init_optimizer(self, optimizer=None):
if optimizer is None:
if isinstance(self._optimizer, str):
optimizer = self._optimizer
else:
if self._optimizer is None:
self._optimizer = Adam(self._lr)
if isinstance(self._optimizer, Optimizer):
return
raise BuildNetworkError("Invalid optimizer '{}' provided".format(self._optimizer))
if isinstance(optimizer, str):
self._optimizer = self._optimizer_factory.get_optimizer_by_name(optimizer, self._lr)
elif isinstance(optimizer, Optimizer):
self._optimizer = optimizer
else:
raise BuildNetworkError("Invalid optimizer '{}' provided".format(optimizer))
@NNTiming.timeit(level=4)
def _init_layers(self):
for _layer in self._layers:
_layer.init(self._sess)
@NNTiming.timeit(level=4)
def _init_structure(self, verbose):
x_shape = self._layers[0].shape[0]
if isinstance(x_shape, int):
x_shape = x_shape,
y_shape = self._layers[-1].shape[1]
x_placeholder, y_placeholder = np.zeros((1, *x_shape)), np.zeros((1, y_shape))
self.fit(x_placeholder, y_placeholder, epoch=0, train_only=True, verbose=verbose)
self._transferred_flags["train"] = False
@NNTiming.timeit(level=4)
def _init_train_step(self, sess):
if not self._loaded:
self._train_step = self._optimizer.minimize(self._loss)
sess.run(tf.global_variables_initializer())
else:
_var_cache = set(tf.global_variables())
self._train_step = self._optimizer.minimize(self._loss)
sess.run(tf.variables_initializer(set(tf.global_variables()) - _var_cache))
# Batch Work
@NNTiming.timeit(level=2)
def _batch_work(self, i, bar, counter, x_train, y_train, x_test, y_test, show_loss, condition,
tensorboard_verbose, train_repeat, sess, train_merge_op, test_merge_op,
train_writer, test_writer):
if tensorboard_verbose > 0:
count = counter * train_repeat + i
y_train_pred = self.predict(x_train, get_raw_results=True, transfer_x=False)
y_test_pred = self.predict(x_test, get_raw_results=True, transfer_x=False)
train_summary = sess.run(train_merge_op, feed_dict={
self._tfy: y_train, self._y_pred: y_train_pred
})
test_summary = sess.run(test_merge_op, feed_dict={
self._tfy: y_test, self._y_pred: y_test_pred
})
train_writer.add_summary(train_summary, count)
test_writer.add_summary(test_summary, count)
else:
y_train_pred = y_test_pred = None
if bar is not None:
condition = bar.update() and condition
if condition:
self._append_log(x_train, y_train, y_train_pred, "Train", show_loss)
self._append_log(x_test, y_test, y_test_pred, "Test", show_loss)
self._print_metric_logs("Train", show_loss)
self._print_metric_logs("Test", show_loss)
# API
@NNTiming.timeit(level=4, prefix="[API] ")
def get_current_pipe(self, idx):
_last_layer = self._layers[-1]
if not isinstance(_last_layer, NNPipe):
return
return _last_layer["nn_lst"][idx]
@NNTiming.timeit(level=4, prefix="[API] ")
def build(self, units="load"):
if isinstance(units, str):
if units == "load":
for name, param in zip(self._layer_names, self._layer_params):
self.add(name, *param)
else:
raise NotImplementedError("Invalid param '{}' provided to 'build' method".format(units))
else:
try:
units = np.asarray(units).flatten().astype(np.int)
except ValueError as err:
raise BuildLayerError(err)
if len(units) < 2:
raise BuildLayerError("At least 2 layers are needed")
_input_shape = (units[0], units[1])
self.initialize()
self.add("ReLU", _input_shape)
for unit_num in units[2:-1]:
self.add("ReLU", (unit_num,))
self.add("CrossEntropy", (units[-1],))
@NNTiming.timeit(level=4, prefix="[API] ")
def split_data(self, x, y, x_test, y_test,
train_only, training_scale=NNConfig.TRAINING_SCALE):
if train_only:
if x_test is not None and y_test is not None:
if not self._transferred_flags["test"]:
x, y = np.vstack((x, NNDist._transfer_x(np.asarray(x_test)))), np.vstack((y, y_test))
self._transferred_flags["test"] = True
x_train = x_test = x.astype(np.float32)
y_train = y_test = y.astype(np.float32)
else:
shuffle_suffix = np.random.permutation(len(x))
x, y = x[shuffle_suffix], y[shuffle_suffix]
if x_test is None or y_test is None:
train_len = int(len(x) * training_scale)
x_train, y_train = x[:train_len], y[:train_len]
x_test, y_test = x[train_len:], y[train_len:]
else:
x_train, y_train = x, y
if not self._transferred_flags["test"]:
x_test, y_test = NNDist._transfer_x(np.asarray(x_test)), np.asarray(y_test, dtype=np.float32)
self._transferred_flags["test"] = True
if NNConfig.BOOST_LESS_SAMPLES:
if y_train.shape[1] != 2:
raise BuildNetworkError("It is not permitted to boost less samples in multiple classification")
y_train_arg = np.argmax(y_train, axis=1)
y0 = y_train_arg == 0
y1 = ~y0
y_len, y0_len = len(y_train), np.sum(y0) # type: int
if y0_len > int(0.5 * y_len):
y0, y1 = y1, y0
y0_len = y_len - y0_len
boost_suffix = np.random.randint(y0_len, size=y_len - y0_len)
x_train = np.vstack((x_train[y1], x_train[y0][boost_suffix]))
y_train = np.vstack((y_train[y1], y_train[y0][boost_suffix]))
shuffle_suffix = np.random.permutation(len(x_train))
x_train, y_train = x_train[shuffle_suffix], y_train[shuffle_suffix]
return (x_train, x_test), (y_train, y_test)
@NNTiming.timeit(level=1, prefix="[API] ")
def fit(self,
x, y, x_test=None, y_test=None,
lr=0.001, lb=0.001, epoch=10, weight_scale=1,
batch_size=128, record_period=1, train_only=False, optimizer=None,
show_loss=True, metrics=None, do_log=False, verbose=None,
tensorboard_verbose=0, animation_params=None):
x, y = self._feed_data(x, y)
self._lr = lr
self._init_optimizer(optimizer)
print("Optimizer: ", self._optimizer.name)
print("-" * 30)
if not self._layers:
raise BuildNetworkError("Please provide layers before fitting data")
if y.shape[1] != self._current_dimension:
raise BuildNetworkError("Output layer's shape should be {}, {} found".format(
self._current_dimension, y.shape[1]))
(x_train, x_test), (y_train, y_test) = self.split_data(x, y, x_test, y_test, train_only)
train_len, test_len = len(x_train), len(x_test)
batch_size = min(batch_size, train_len)
do_random_batch = train_len > batch_size
train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1
with tf.name_scope("Entry"):
self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])
if epoch <= 0:
return
self._metrics = ["acc"] if metrics is None else metrics
for i, metric in enumerate(self._metrics):
if isinstance(metric, str):
if metric not in self._available_metrics:
raise BuildNetworkError("Metric '{}' is not implemented".format(metric))
self._metrics[i] = self._available_metrics[metric]
self._metric_names = [_m.__name__ for _m in self._metrics]
self._logs = {
name: [[] for _ in range(len(self._metrics) + 1)] for name in ("Train", "Test")
}
if verbose is not None:
self.verbose = verbose
bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
if self.verbose >= NNVerbose.EPOCH:
bar.start()
img = None
*animation_properties, animation_params = self._get_animation_params(animation_params)
with self._sess.as_default() as sess:
with tf.name_scope("ActivationFlow"):
self._activations = self._get_activations(self._tfx)
self._y_pred = self._activations[-1]
l2_losses = self._get_l2_losses(lb) # type: list
self._loss = self._layers[-1].calculate(self._tfy, self._y_pred) + tf.reduce_sum(l2_losses)
self._metric_rs = [metric(self._tfy, self._y_pred) for metric in self._metrics]
self._init_train_step(sess)
for weight in self._tf_weights:
weight *= weight_scale
if tensorboard_verbose > 0:
log_dir = os.path.join("tbLogs", str(datetime.datetime.now())[:19].replace(":", "-"))
train_dir = os.path.join(log_dir, "train")
test_dir = os.path.join(log_dir, "test")
for _dir in (log_dir, train_dir, test_dir):
if not os.path.isdir(_dir):
os.makedirs(_dir)
test_summary_ops = []
with tf.name_scope("l2_loss"):
layer_names = [
self._get_tb_name(layer) for layer in self._layers
if not isinstance(layer, SubLayer) and not isinstance(layer, ConvPoolLayer)
]
for name, l2_loss in zip(layer_names, l2_losses):
tf.summary.scalar(name, l2_loss)
with tf.name_scope("GlobalSummaries"):
test_summary_ops.append(tf.summary.scalar("loss", self._loss))
for name, metric_rs in zip(self._metric_names, self._metric_rs):
test_summary_ops.append(tf.summary.scalar(name, metric_rs))
train_merge_op = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_dir, sess.graph)
train_writer.add_graph(sess.graph)
test_writer = tf.summary.FileWriter(test_dir)
test_merge_op = tf.summary.merge(test_summary_ops)
else:
train_writer = test_writer = train_merge_op = test_merge_op = None
args = (
x_train, y_train, x_test, y_test, show_loss,
self.verbose >= NNVerbose.METRICS_DETAIL,
tensorboard_verbose, train_repeat, sess, train_merge_op, test_merge_op,
train_writer, test_writer
)
ims = []
for counter in range(epoch):
if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration")
else:
sub_bar = None
self._batch_training(
x_train, y_train, batch_size, train_repeat,
self._loss, self._train_step, sub_bar, counter, *args)
self._handle_animation(
counter, x, y, ims, animation_params, *animation_properties,
img=self._draw_2d_network(**animation_params), name="Neural Network"
)
if (counter + 1) % record_period == 0:
if do_log:
self._append_log(x_train, y_train, None, "Train", show_loss)
self._append_log(x_test, y_test, None, "Test", show_loss)
if self.verbose >= NNVerbose.METRICS:
self._print_metric_logs("Train", show_loss)
self._print_metric_logs("Test", show_loss)
if self.verbose >= NNVerbose.EPOCH:
bar.update(counter // record_period + 1)
if img is not None:
cv2.waitKey(0)
cv2.destroyAllWindows()
self._handle_mp4(ims, animation_properties, "NN")
@NNTiming.timeit(level=2, prefix="[API] ")
def save(self, path=None, name=None, overwrite=True):
path = "Models" if path is None else path
name = "Cache" if name is None else name
folder = os.path.join(path, name)
if not os.path.exists(folder):
os.makedirs(folder)
_dir = os.path.join(folder, "Model")
if os.path.isfile(_dir):
if not overwrite:
_count = 1
_new_dir = _dir + "({})".format(_count)
while os.path.isfile(_new_dir):
_count += 1
_new_dir = _dir + "({})".format(_count)
_dir = _new_dir
else:
os.remove(_dir)
print()
print("=" * 60)
print("Saving Model to {}...".format(folder))
print("-" * 60)
with open(_dir + ".nn", "wb") as file:
# We don't need w_stds & b_inits when we load a model
_dic = {
"structures": {
"_lr": self._lr,
"_layer_names": self.layer_names,
"_layer_params": self._layer_params,
"_next_dimension": self._current_dimension
},
"params": {
"_logs": self._logs,
"_metric_names": self._metric_names,
"_optimizer": self._optimizer.name,
"layer_special_params": self.layer_special_params
}
}
pickle.dump(_dic, file)
saver = tf.train.Saver()
saver.save(self._sess, _dir)
graph_io.write_graph(self._sess.graph, os.path.join(path, name), "Model.pb", False)
with tf.name_scope("OutputFlow"):
self.get_rs(self._tfx)
_output = ""
for op in self._sess.graph.get_operations()[::-1]:
if "OutputFlow" in op.name:
_output = op.name
break
with open(os.path.join(path, name, "IO.txt"), "w") as file:
file.write("\n".join([
"Input : Entry/Placeholder:0",
"Output : {}:0".format(_output)
]))
graph_io.write_graph(self._sess.graph, os.path.join(path, name), "Cache.pb", False)
freeze_graph.freeze_graph(
os.path.join(path, name, "Cache.pb"),
"", True, os.path.join(path, name, "Model"),
_output, "save/restore_all", "save/Const:0",
os.path.join(path, name, "Frozen.pb"), True, ""
)
os.remove(os.path.join(path, name, "Cache.pb"))
print("Done")
print("=" * 60)
@NNTiming.timeit(level=2, prefix="[API] ")
def load(self, path=None, verbose=2):
if path is None:
path = os.path.join("Models", "Cache", "Model")
else:
path = os.path.join(path, "Model")
self.initialize()
try:
with open(path + ".nn", "rb") as file:
_dic = pickle.load(file)
for key, value in _dic["structures"].items():
setattr(self, key, value)
self.build()
for key, value in _dic["params"].items():
setattr(self, key, value)
self._init_optimizer()
for i in range(len(self._metric_names) - 1, -1, -1):
name = self._metric_names[i]
if name not in self._available_metrics:
self._metric_names.pop(i)
else:
self._metrics.insert(0, self._available_metrics[name])
except Exception as err:
raise BuildNetworkError("Failed to load Network ({}), structure initialized".format(err))
self._loaded = True
saver = tf.train.Saver()
saver.restore(self._sess, path)
self._init_layers()
self._init_structure(verbose)
print()
print("=" * 30)
print("Model restored")
print("=" * 30)
@NNTiming.timeit(level=4, prefix="[API] ")
def predict(self, x, get_raw_results=False, transfer_x=True):
x = np.asarray(x, dtype=np.float32)
if transfer_x:
x = NNDist._transfer_x(x)
y_pred = self._get_prediction(x)
return y_pred if get_raw_results else np.argmax(y_pred, axis=1)
@NNTiming.timeit()
def evaluate(self, x, y, metrics=None, tar=0, prefix="Acc", **kwargs):
logs, y_pred = [], self._get_prediction(NNDist._transfer_x(x))
for i, metric_rs in enumerate(self._metric_rs):
logs.append(self._sess.run(metric_rs, {
self._tfy: y, self._y_pred: y_pred
}))
if isinstance(tar, int):
print(prefix + ": {:12.8}".format(logs[tar]))
return logs
def draw_results(self):
metrics_log, loss_log = {}, {}
for key, value in sorted(self._logs.items()):
metrics_log[key], loss_log[key] = value[:-1], value[-1]
for i, name in enumerate(sorted(self._metric_names)):
plt.figure()
plt.title("Metric Type: {}".format(name))
for key, log in sorted(metrics_log.items()):
xs = np.arange(len(log[i])) + 1
plt.plot(xs, log[i], label="Data Type: {}".format(key))
plt.legend(loc=4)
plt.show()
plt.close()
plt.figure()
plt.title("Cost")
for key, loss in sorted(loss_log.items()):
xs = np.arange(len(loss)) + 1
plt.plot(xs, loss, label="Data Type: {}".format(key))
plt.legend()
plt.show()
def draw_conv_weights(self):
with self._sess.as_default():
for i, (name, weight) in enumerate(zip(self.layer_names, self._tf_weights)):
weight = weight.eval()
if len(weight.shape) != 4:
continue
for j, _w in enumerate(weight.transpose(2, 3, 0, 1)):
VisUtil.show_batch_img(_w, "{} {} filter {}".format(name, i + 1, j + 1))
def draw_conv_series(self, x, shape=None):
x = np.asarray(x)
for xx in x:
VisUtil.show_img(VisUtil.trans_img(xx, shape), "Original")
for i, (layer, ac) in enumerate(zip(
self._layers, self._get_acts(np.array([xx.transpose(1, 2, 0)], dtype=np.float32)))):
if len(ac.shape) == 4:
VisUtil.show_batch_img(ac[0].transpose(2, 0, 1), "Layer {} ({})".format(i + 1, layer.name))
else:
ac = ac[0]
length = sqrt(np.prod(ac.shape))
if length < 10:
continue
(height, width) = xx.shape[1:] if shape is None else shape[1:]
sqrt_shape = sqrt(height * width)
oh, ow = int(length * height / sqrt_shape), int(length * width / sqrt_shape)
VisUtil.show_img(ac[:oh * ow].reshape(oh, ow), "Layer {} ({})".format(i + 1, layer.name))
@staticmethod
def fuck_pycharm_warning():
print(Axes3D.acorr)
class NNFrozen(NNBase):
NNTiming = Timing()
def __init__(self):
super(NNFrozen, self).__init__()
self._sess = tf.Session()
self._entry = self._output = None
@NNTiming.timeit(level=4, prefix="[API] ")
def load(self, path=None, pb="Frozen.pb"):
if path is None:
path = os.path.join("Models", "Cache")
try:
with open(os.path.join(path, "Model.nn"), "rb") as file:
_dic = pickle.load(file)
for key, value in _dic["structures"].items():
setattr(self, key, value)
for name, param in zip(self._layer_names, self._layer_params):
self.add(name, *param)
for key, value in _dic["params"].items():
setattr(self, key, value)
except Exception as err:
raise BuildNetworkError("Failed to load Network ({}), structure initialized".format(err))
with open(os.path.join(path, "IO.txt"), "r") as file:
self._entry = file.readline().strip()[9:]
self._output = file.readline().strip()[9:]
Util.load_frozen_graph(os.path.join(path, pb), True, self._entry, self._output)
print()
print("=" * 30)
print("Model restored")
print("=" * 30)
@NNTiming.timeit(level=2, prefix="[API] ")
def predict(self, x, get_raw_results=False, **kwargs):
x = NNDist._transfer_x(np.asarray(x))
rs = []
batch_size = floor(1e6 / np.prod(x.shape[1:]))
epoch = int(ceil(len(x) / batch_size))
output = self._sess.graph.get_tensor_by_name(self._output)
bar = ProgressBar(max_value=epoch, name="Predict")
for i in range(epoch):
if i == epoch - 1:
rs.append(self._sess.run(output, {
self._entry: x[i * batch_size:]
}))
else:
rs.append(self._sess.run(output, {
self._entry: x[i * batch_size:(i + 1) * batch_size]
}))
bar.update()
y_pred = np.vstack(rs).astype(np.float32)
return y_pred if get_raw_results else np.argmax(y_pred, axis=1)
@NNTiming.timeit(level=1, prefix="[API] ")
def evaluate(self, x, y, metrics=None, tar=None, prefix="Acc", **kwargs):
y_pred = self.predict(x)
print("Acc: {:8.6} %".format(100 * np.sum(np.argmax(y, axis=1) == np.argmax(y_pred, axis=1)) / len(y)))
class NNPipe:
NNTiming = Timing()
def __init__(self, num):
self._nn_lst = [NNBase() for _ in range(num)]
for _nn in self._nn_lst:
_nn.verbose = 0
self._initialized = [False] * num
self.is_sub_layer = False
self.parent = None
def __getitem__(self, item):
if isinstance(item, str):
return getattr(self, "_" + item)
return
def __str__(self):
return "NNPipe"
__repr__ = __str__
@property
def name(self):
return "NNPipe"
@property
def n_filters(self):
return sum([_nn["layers"][-1].n_filters for _nn in self._nn_lst])
@property
def out_h(self):
return self._nn_lst[0]["layers"][-1].out_h
@property
def out_w(self):
return self._nn_lst[0]["layers"][-1].out_w
@property
def shape(self):
# TODO: modify shape[0] to be the correct one
return (self.n_filters, self.out_h, self.out_w), (self.n_filters, self.out_h, self.out_w)
@property
def info(self):
return "Pipe ({:^3d})".format(len(self._nn_lst)) + " " * 65 + "- out: {}".format(
self.shape[1])
@property
def initialized(self):
return self._initialized
@NNTiming.timeit(level=4, prefix="[API] ")
def preview(self):
print("=" * 90)
print("Pipe Structure")
for i, nn in enumerate(self._nn_lst):
print("-" * 60 + "\n" + str(i) + "\n" + "-" * 60)
nn.preview()
@NNTiming.timeit(level=4, prefix="[API] ")
def add(self, idx, layer, shape, *args, **kwargs):
if shape is None:
self._nn_lst[idx].add(layer, *args, **kwargs)
else:
self._nn_lst[idx].add(layer, shape, *args, **kwargs)
self._initialized[idx] = True
@NNTiming.timeit(level=1, prefix="[API] ")
def get_rs(self, x, predict):
return tf.concat([nn.get_rs(x, predict=predict, pipe=True) for nn in self._nn_lst], 3)
|
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sysroot controller tests."""
from __future__ import print_function
import datetime
import os
import sys
from chromite.api import api_config
from chromite.api import controller
from chromite.api.controller import sysroot as sysroot_controller
from chromite.api.gen.chromite.api import sysroot_pb2
from chromite.api.gen.chromiumos import common_pb2
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.lib import sysroot_lib
from chromite.service import sysroot as sysroot_service
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class CreateTest(cros_test_lib.MockTestCase, api_config.ApiConfigMixin):
"""Create function tests."""
def _InputProto(self, build_target=None, profile=None, replace=False,
current=False):
"""Helper to build and input proto instance."""
proto = sysroot_pb2.SysrootCreateRequest()
if build_target:
proto.build_target.name = build_target
if profile:
proto.profile.name = profile
if replace:
proto.flags.replace = replace
if current:
proto.flags.chroot_current = current
return proto
def _OutputProto(self):
"""Helper to build output proto instance."""
return sysroot_pb2.SysrootCreateResponse()
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'Create')
board = 'board'
profile = None
force = False
upgrade_chroot = True
in_proto = self._InputProto(build_target=board, profile=profile,
replace=force, current=not upgrade_chroot)
sysroot_controller.Create(in_proto, self._OutputProto(),
self.validate_only_config)
patch.assert_not_called()
def testMockCall(self):
"""Sanity check that a mock call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'Create')
request = self._InputProto()
response = self._OutputProto()
rc = sysroot_controller.Create(request, response, self.mock_call_config)
patch.assert_not_called()
self.assertEqual(controller.RETURN_CODE_SUCCESS, rc)
def testMockError(self):
"""Sanity check that a mock error does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'Create')
request = self._InputProto()
response = self._OutputProto()
rc = sysroot_controller.Create(request, response, self.mock_error_config)
patch.assert_not_called()
self.assertEqual(controller.RETURN_CODE_UNRECOVERABLE, rc)
def testArgumentValidation(self):
"""Test the input argument validation."""
# Error when no name provided.
in_proto = self._InputProto()
out_proto = self._OutputProto()
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.Create(in_proto, out_proto, self.api_config)
# Valid when board passed.
result = sysroot_lib.Sysroot('/sysroot/path')
patch = self.PatchObject(sysroot_service, 'Create', return_value=result)
in_proto = self._InputProto('board')
out_proto = self._OutputProto()
sysroot_controller.Create(in_proto, out_proto, self.api_config)
patch.assert_called_once()
def testArgumentHandling(self):
"""Test the arguments get processed and passed correctly."""
sysroot_path = '/sysroot/path'
sysroot = sysroot_lib.Sysroot(sysroot_path)
create_patch = self.PatchObject(sysroot_service, 'Create',
return_value=sysroot)
rc_patch = self.PatchObject(sysroot_service, 'SetupBoardRunConfig')
# Default values.
board = 'board'
profile = None
force = False
upgrade_chroot = True
in_proto = self._InputProto(build_target=board, profile=profile,
replace=force, current=not upgrade_chroot)
out_proto = self._OutputProto()
sysroot_controller.Create(in_proto, out_proto, self.api_config)
# Default value checks.
rc_patch.assert_called_with(force=force, upgrade_chroot=upgrade_chroot)
self.assertEqual(board, out_proto.sysroot.build_target.name)
self.assertEqual(sysroot_path, out_proto.sysroot.path)
# Not default values.
create_patch.reset_mock()
board = 'board'
profile = 'profile'
force = True
upgrade_chroot = False
in_proto = self._InputProto(build_target=board, profile=profile,
replace=force, current=not upgrade_chroot)
out_proto = self._OutputProto()
sysroot_controller.Create(in_proto, out_proto, self.api_config)
# Not default value checks.
rc_patch.assert_called_with(force=force, upgrade_chroot=upgrade_chroot)
self.assertEqual(board, out_proto.sysroot.build_target.name)
self.assertEqual(sysroot_path, out_proto.sysroot.path)
class CreateSimpleChromeSysrootTest(cros_test_lib.MockTempDirTestCase,
api_config.ApiConfigMixin):
"""CreateSimpleChromeSysroot function tests."""
def _InputProto(self, build_target=None, use_flags=None):
"""Helper to build and input proto instance."""
proto = sysroot_pb2.CreateSimpleChromeSysrootRequest()
if build_target:
proto.build_target.name = build_target
if use_flags:
proto.use_flags = use_flags
return proto
def _OutputProto(self):
"""Helper to build output proto instance."""
return sysroot_pb2.CreateSimpleChromeSysrootResponse()
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'CreateSimpleChromeSysroot')
board = 'board'
in_proto = self._InputProto(build_target=board, use_flags=[])
sysroot_controller.CreateSimpleChromeSysroot(in_proto, self._OutputProto(),
self.validate_only_config)
patch.assert_not_called()
def testMockCall(self):
"""Sanity check that a mock call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'CreateSimpleChromeSysroot')
board = 'board'
in_proto = self._InputProto(build_target=board, use_flags=[])
rc = sysroot_controller.CreateSimpleChromeSysroot(in_proto,
self._OutputProto(),
self.mock_call_config)
self.assertEqual(controller.RETURN_CODE_SUCCESS, rc)
patch.assert_not_called()
def testArgumentValidation(self):
"""Test the input argument validation."""
# Error when no build target provided.
in_proto = self._InputProto()
out_proto = self._OutputProto()
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.CreateSimpleChromeSysroot(in_proto, out_proto,
self.api_config)
# Valid when board is specified.
patch = self.PatchObject(sysroot_service, 'CreateSimpleChromeSysroot',
return_value='/path/to/sysroot/tar.bz')
in_proto = self._InputProto(build_target='board')
out_proto = self._OutputProto()
sysroot_controller.CreateSimpleChromeSysroot(in_proto, out_proto,
self.api_config)
patch.assert_called_once()
class GenerateArchiveTest(cros_test_lib.MockTempDirTestCase,
api_config.ApiConfigMixin):
"""GenerateArchive function tests."""
def setUp(self):
self.chroot_path = '/path/to/chroot'
self.board = 'board'
def _InputProto(self, build_target=None, chroot_path=None, pkg_list=None):
"""Helper to build and input proto instance."""
# pkg_list will be a list of category/package strings such as
# ['virtual/target-fuzzers'].
if pkg_list:
package_list = []
for pkg in pkg_list:
pkg_string_parts = pkg.split('/')
package_info = common_pb2.PackageInfo(
category=pkg_string_parts[0],
package_name=pkg_string_parts[1])
package_list.append(package_info)
else:
package_list = []
return sysroot_pb2.SysrootGenerateArchiveRequest(
build_target={'name': build_target},
chroot={'path': chroot_path},
packages=package_list)
def _OutputProto(self):
"""Helper to build output proto instance."""
return sysroot_pb2.SysrootGenerateArchiveResponse()
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'GenerateArchive')
in_proto = self._InputProto(build_target=self.board,
chroot_path=self.chroot_path,
pkg_list=['virtual/target-fuzzers'])
sysroot_controller.GenerateArchive(in_proto, self._OutputProto(),
self.validate_only_config)
patch.assert_not_called()
def testMockCall(self):
"""Sanity check that a mock call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'GenerateArchive')
in_proto = self._InputProto(build_target=self.board,
chroot_path=self.chroot_path,
pkg_list=['virtual/target-fuzzers'])
sysroot_controller.GenerateArchive(in_proto,
self._OutputProto(),
self.mock_call_config)
patch.assert_not_called()
def testArgumentValidation(self):
"""Test the input argument validation."""
# Error when no build target provided.
in_proto = self._InputProto()
out_proto = self._OutputProto()
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.GenerateArchive(in_proto, out_proto, self.api_config)
# Error when packages is not specified.
in_proto = self._InputProto(build_target='board',
chroot_path=self.chroot_path)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.GenerateArchive(in_proto, out_proto, self.api_config)
# Valid when board, chroot path, and package are specified.
patch = self.PatchObject(sysroot_service, 'GenerateArchive',
return_value='/path/to/sysroot/tar.bz')
in_proto = self._InputProto(build_target='board',
chroot_path=self.chroot_path,
pkg_list=['virtual/target-fuzzers'])
out_proto = self._OutputProto()
sysroot_controller.GenerateArchive(in_proto, out_proto, self.api_config)
patch.assert_called_once()
class InstallToolchainTest(cros_test_lib.MockTempDirTestCase,
api_config.ApiConfigMixin):
"""Install toolchain function tests."""
def setUp(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
# Avoid running the portageq command.
self.PatchObject(sysroot_controller, '_LogBinhost')
self.board = 'board'
self.sysroot = os.path.join(self.tempdir, 'board')
self.invalid_sysroot = os.path.join(self.tempdir, 'invalid', 'sysroot')
osutils.SafeMakedirs(self.sysroot)
def _InputProto(self, build_target=None, sysroot_path=None,
compile_source=False):
"""Helper to build an input proto instance."""
proto = sysroot_pb2.InstallToolchainRequest()
if build_target:
proto.sysroot.build_target.name = build_target
if sysroot_path:
proto.sysroot.path = sysroot_path
if compile_source:
proto.flags.compile_source = compile_source
return proto
def _OutputProto(self):
"""Helper to build output proto instance."""
return sysroot_pb2.InstallToolchainResponse()
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'InstallToolchain')
in_proto = self._InputProto(build_target=self.board,
sysroot_path=self.sysroot)
sysroot_controller.InstallToolchain(in_proto, self._OutputProto(),
self.validate_only_config)
patch.assert_not_called()
def testMockCall(self):
"""Sanity check that a mock call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'InstallToolchain')
request = self._InputProto()
response = self._OutputProto()
rc = sysroot_controller.InstallToolchain(request, response,
self.mock_call_config)
patch.assert_not_called()
self.assertEqual(controller.RETURN_CODE_SUCCESS, rc)
def testMockError(self):
"""Sanity check that a mock error does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'InstallToolchain')
request = self._InputProto()
response = self._OutputProto()
rc = sysroot_controller.InstallToolchain(request, response,
self.mock_error_config)
patch.assert_not_called()
self.assertEqual(controller.RETURN_CODE_UNSUCCESSFUL_RESPONSE_AVAILABLE, rc)
self.assertTrue(response.failed_packages)
def testArgumentValidation(self):
"""Test the argument validation."""
# Test errors on missing inputs.
out_proto = self._OutputProto()
# Both missing.
in_proto = self._InputProto()
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallToolchain(in_proto, out_proto, self.api_config)
# Sysroot path missing.
in_proto = self._InputProto(build_target=self.board)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallToolchain(in_proto, out_proto, self.api_config)
# Build target name missing.
in_proto = self._InputProto(sysroot_path=self.sysroot)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallToolchain(in_proto, out_proto, self.api_config)
# Both provided, but invalid sysroot path.
in_proto = self._InputProto(build_target=self.board,
sysroot_path=self.invalid_sysroot)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallToolchain(in_proto, out_proto, self.api_config)
def testSuccessOutputHandling(self):
"""Test the output is processed and recorded correctly."""
self.PatchObject(sysroot_service, 'InstallToolchain')
out_proto = self._OutputProto()
in_proto = self._InputProto(build_target=self.board,
sysroot_path=self.sysroot)
rc = sysroot_controller.InstallToolchain(in_proto, out_proto,
self.api_config)
self.assertFalse(rc)
self.assertFalse(out_proto.failed_packages)
def testErrorOutputHandling(self):
"""Test the error output is processed and recorded correctly."""
out_proto = self._OutputProto()
in_proto = self._InputProto(build_target=self.board,
sysroot_path=self.sysroot)
err_pkgs = ['cat/pkg', 'cat2/pkg2']
err_cpvs = [portage_util.SplitCPV(pkg, strict=False) for pkg in err_pkgs]
expected = [('cat', 'pkg'), ('cat2', 'pkg2')]
err = sysroot_lib.ToolchainInstallError('Error',
cros_build_lib.CommandResult(),
tc_info=err_cpvs)
self.PatchObject(sysroot_service, 'InstallToolchain', side_effect=err)
rc = sysroot_controller.InstallToolchain(in_proto, out_proto,
self.api_config)
self.assertEqual(controller.RETURN_CODE_UNSUCCESSFUL_RESPONSE_AVAILABLE, rc)
self.assertTrue(out_proto.failed_packages)
for package in out_proto.failed_packages:
cat_pkg = (package.category, package.package_name)
self.assertIn(cat_pkg, expected)
class InstallPackagesTest(cros_test_lib.MockTempDirTestCase,
api_config.ApiConfigMixin):
"""InstallPackages tests."""
def setUp(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
# Avoid running the portageq command.
self.PatchObject(sysroot_controller, '_LogBinhost')
self.build_target = 'board'
self.sysroot = os.path.join(self.tempdir, 'build', 'board')
osutils.SafeMakedirs(self.sysroot)
# Set up goma directories.
self.goma_dir = os.path.join(self.tempdir, 'goma_dir')
osutils.SafeMakedirs(self.goma_dir)
self.goma_out_dir = os.path.join(self.tempdir, 'goma_out_dir')
osutils.SafeMakedirs(self.goma_out_dir)
os.environ['GLOG_log_dir'] = self.goma_dir
def _InputProto(self, build_target=None, sysroot_path=None,
build_source=False, goma_dir=None, goma_log_dir=None,
goma_stats_file=None, goma_counterz_file=None):
"""Helper to build an input proto instance."""
instance = sysroot_pb2.InstallPackagesRequest()
if build_target:
instance.sysroot.build_target.name = build_target
if sysroot_path:
instance.sysroot.path = sysroot_path
if build_source:
instance.flags.build_source = build_source
if goma_dir:
instance.goma_config.goma_dir = goma_dir
if goma_log_dir:
instance.goma_config.log_dir.dir = goma_log_dir
if goma_stats_file:
instance.goma_config.stats_file = goma_stats_file
if goma_counterz_file:
instance.goma_config.counterz_file = goma_counterz_file
return instance
def _OutputProto(self):
"""Helper to build an empty output proto instance."""
return sysroot_pb2.InstallPackagesResponse()
def _CreateGomaLogFile(self, goma_log_dir, name, timestamp):
"""Creates a log file for testing.
Args:
goma_log_dir (str): Directory where the file will be created.
name (str): Log file 'base' name that is combined with the timestamp.
timestamp (datetime): timestamp that is written to the file.
"""
path = os.path.join(
goma_log_dir,
'%s.host.log.INFO.%s' % (name, timestamp.strftime('%Y%m%d-%H%M%S.%f')))
osutils.WriteFile(
path,
timestamp.strftime('Goma log file created at: %Y/%m/%d %H:%M:%S'))
def testValidateOnly(self):
"""Sanity check that a validate only call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'BuildPackages')
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot)
sysroot_controller.InstallPackages(in_proto, self._OutputProto(),
self.validate_only_config)
patch.assert_not_called()
def testMockCall(self):
"""Sanity check that a mock call does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'BuildPackages')
request = self._InputProto()
response = self._OutputProto()
rc = sysroot_controller.InstallPackages(request, response,
self.mock_call_config)
patch.assert_not_called()
self.assertEqual(controller.RETURN_CODE_SUCCESS, rc)
def testMockError(self):
"""Sanity check that a mock error does not execute any logic."""
patch = self.PatchObject(sysroot_service, 'BuildPackages')
request = self._InputProto()
response = self._OutputProto()
rc = sysroot_controller.InstallPackages(request, response,
self.mock_error_config)
patch.assert_not_called()
self.assertEqual(controller.RETURN_CODE_UNSUCCESSFUL_RESPONSE_AVAILABLE, rc)
self.assertTrue(response.failed_packages)
def testArgumentValidationAllMissing(self):
"""Test missing all arguments."""
out_proto = self._OutputProto()
in_proto = self._InputProto()
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallPackages(in_proto, out_proto, self.api_config)
def testArgumentValidationNoSysroot(self):
"""Test missing sysroot path."""
out_proto = self._OutputProto()
in_proto = self._InputProto(build_target=self.build_target)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallPackages(in_proto, out_proto, self.api_config)
def testArgumentValidationNoBuildTarget(self):
"""Test missing build target name."""
out_proto = self._OutputProto()
in_proto = self._InputProto(sysroot_path=self.sysroot)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallPackages(in_proto, out_proto, self.api_config)
def testArgumentValidationInvalidSysroot(self):
"""Test sysroot that hasn't had the toolchain installed."""
out_proto = self._OutputProto()
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot)
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=False)
with self.assertRaises(cros_build_lib.DieSystemExit):
sysroot_controller.InstallPackages(in_proto, out_proto, self.api_config)
def testSuccessOutputHandling(self):
"""Test successful call output handling."""
# Prevent argument validation error.
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=True)
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot)
out_proto = self._OutputProto()
self.PatchObject(sysroot_service, 'BuildPackages')
rc = sysroot_controller.InstallPackages(in_proto, out_proto,
self.api_config)
self.assertFalse(rc)
self.assertFalse(out_proto.failed_packages)
def testSuccessWithGomaLogs(self):
"""Test successful call with goma."""
self._CreateGomaLogFile(self.goma_dir, 'compiler_proxy',
datetime.datetime(2018, 9, 21, 12, 0, 0))
self._CreateGomaLogFile(self.goma_dir, 'compiler_proxy-subproc',
datetime.datetime(2018, 9, 21, 12, 1, 0))
self._CreateGomaLogFile(self.goma_dir, 'gomacc',
datetime.datetime(2018, 9, 21, 12, 2, 0))
# Prevent argument validation error.
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=True)
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot,
goma_dir=self.goma_dir,
goma_log_dir=self.goma_out_dir)
out_proto = self._OutputProto()
self.PatchObject(sysroot_service, 'BuildPackages')
rc = sysroot_controller.InstallPackages(in_proto, out_proto,
self.api_config)
self.assertFalse(rc)
self.assertFalse(out_proto.failed_packages)
self.assertCountEqual(out_proto.goma_artifacts.log_files, [
'compiler_proxy-subproc.host.log.INFO.20180921-120100.000000.gz',
'compiler_proxy.host.log.INFO.20180921-120000.000000.gz',
'gomacc.host.log.INFO.20180921-120200.000000.tar.gz'])
def testSuccessWithGomaLogsAndStatsCounterzFiles(self):
"""Test successful call with goma including stats and counterz files."""
self._CreateGomaLogFile(self.goma_dir, 'compiler_proxy',
datetime.datetime(2018, 9, 21, 12, 0, 0))
self._CreateGomaLogFile(self.goma_dir, 'compiler_proxy-subproc',
datetime.datetime(2018, 9, 21, 12, 1, 0))
self._CreateGomaLogFile(self.goma_dir, 'gomacc',
datetime.datetime(2018, 9, 21, 12, 2, 0))
# Create stats and counterz files.
osutils.WriteFile(os.path.join(self.goma_dir, 'stats.binaryproto'),
'File: stats.binaryproto')
osutils.WriteFile(os.path.join(self.goma_dir, 'counterz.binaryproto'),
'File: counterz.binaryproto')
# Prevent argument validation error.
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=True)
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot,
goma_dir=self.goma_dir,
goma_log_dir=self.goma_out_dir,
goma_stats_file='stats.binaryproto',
goma_counterz_file='counterz.binaryproto')
out_proto = self._OutputProto()
self.PatchObject(sysroot_service, 'BuildPackages')
rc = sysroot_controller.InstallPackages(in_proto, out_proto,
self.api_config)
self.assertFalse(rc)
self.assertFalse(out_proto.failed_packages)
self.assertCountEqual(out_proto.goma_artifacts.log_files, [
'compiler_proxy-subproc.host.log.INFO.20180921-120100.000000.gz',
'compiler_proxy.host.log.INFO.20180921-120000.000000.gz',
'gomacc.host.log.INFO.20180921-120200.000000.tar.gz'])
# Verify that the output dir has 5 files -- since there should be 3 log
# files, the stats file, and the counterz file.
output_files = os.listdir(self.goma_out_dir)
self.assertCountEqual(output_files, [
'stats.binaryproto',
'counterz.binaryproto',
'compiler_proxy-subproc.host.log.INFO.20180921-120100.000000.gz',
'compiler_proxy.host.log.INFO.20180921-120000.000000.gz',
'gomacc.host.log.INFO.20180921-120200.000000.tar.gz'])
self.assertEqual(out_proto.goma_artifacts.counterz_file,
'counterz.binaryproto')
self.assertEqual(out_proto.goma_artifacts.stats_file,
'stats.binaryproto')
def testFailureMissingGomaStatsCounterzFiles(self):
"""Test successful call with goma including stats and counterz files."""
self._CreateGomaLogFile(self.goma_dir, 'compiler_proxy',
datetime.datetime(2018, 9, 21, 12, 0, 0))
self._CreateGomaLogFile(self.goma_dir, 'compiler_proxy-subproc',
datetime.datetime(2018, 9, 21, 12, 1, 0))
self._CreateGomaLogFile(self.goma_dir, 'gomacc',
datetime.datetime(2018, 9, 21, 12, 2, 0))
# Note that stats and counterz files are not created, but are specified in
# the proto below.
# Prevent argument validation error.
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=True)
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot,
goma_dir=self.goma_dir,
goma_log_dir=self.goma_out_dir,
goma_stats_file='stats.binaryproto',
goma_counterz_file='counterz.binaryproto')
out_proto = self._OutputProto()
self.PatchObject(sysroot_service, 'BuildPackages')
rc = sysroot_controller.InstallPackages(in_proto, out_proto,
self.api_config)
self.assertFalse(rc)
self.assertFalse(out_proto.failed_packages)
self.assertCountEqual(out_proto.goma_artifacts.log_files, [
'compiler_proxy-subproc.host.log.INFO.20180921-120100.000000.gz',
'compiler_proxy.host.log.INFO.20180921-120000.000000.gz',
'gomacc.host.log.INFO.20180921-120200.000000.tar.gz'])
self.assertFalse(out_proto.goma_artifacts.counterz_file)
self.assertFalse(out_proto.goma_artifacts.stats_file)
def testFailureOutputHandling(self):
"""Test failed package handling."""
# Prevent argument validation error.
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=True)
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot)
out_proto = self._OutputProto()
# Failed package info and expected list for verification.
err_pkgs = ['cat/pkg', 'cat2/pkg2']
err_cpvs = [portage_util.SplitCPV(cpv, strict=False) for cpv in err_pkgs]
expected = [('cat', 'pkg'), ('cat2', 'pkg2')]
# Force error to be raised with the packages.
error = sysroot_lib.PackageInstallError('Error',
cros_build_lib.CommandResult(),
packages=err_cpvs)
self.PatchObject(sysroot_service, 'BuildPackages', side_effect=error)
rc = sysroot_controller.InstallPackages(in_proto, out_proto,
self.api_config)
# This needs to return 2 to indicate the available error response.
self.assertEqual(controller.RETURN_CODE_UNSUCCESSFUL_RESPONSE_AVAILABLE, rc)
for package in out_proto.failed_packages:
cat_pkg = (package.category, package.package_name)
self.assertIn(cat_pkg, expected)
def testNoPackageFailureOutputHandling(self):
"""Test failure handling without packages to report."""
# Prevent argument validation error.
self.PatchObject(sysroot_lib.Sysroot, 'IsToolchainInstalled',
return_value=True)
in_proto = self._InputProto(build_target=self.build_target,
sysroot_path=self.sysroot)
out_proto = self._OutputProto()
# Force error to be raised with no packages.
error = sysroot_lib.PackageInstallError('Error',
cros_build_lib.CommandResult(),
packages=[])
self.PatchObject(sysroot_service, 'BuildPackages', side_effect=error)
rc = sysroot_controller.InstallPackages(in_proto, out_proto,
self.api_config)
# All we really care about is it's not 0 or 2 (response available), so
# test for that rather than a specific return code.
self.assertTrue(rc)
self.assertNotEqual(controller.RETURN_CODE_UNSUCCESSFUL_RESPONSE_AVAILABLE,
rc)
|
|
from PIL import Image
import cv2
import zbar
import numpy as np
np.set_printoptions(precision=2)
from bottle import *
BaseRequest.MEMFILE_MAX = 1e8
import sys
from sys import argv
sys.path.append('/usr/local/lib/python2.7/site-packages')
import io
import json
import logging
import urllib
import openfaceUtils
import config
logger = logging.getLogger(__name__)
try:
pickleUrl = os.getenv('OCV_DATA_PICKLE_URL')
if pickleUrl:
logger.info('Pickle url found, proceeding with download...')
logger.info(pickleUrl)
urllib.urlretrieve(pickleUrl, config.pickleLocation)
jsonUrl = os.getenv('OCV_DATA_JSON_URL')
if jsonUrl:
logger.info('Pickle data url found, proceeding with download...')
logger.info(jsonUrl)
urllib.urlretrieve(jsonUrl, config.pickleJsonLocation)
except Exception as e:
logger.error("Unable to load pickle from url")
logger.exception(e)
# configure openface model
with open(config.pickleLocation) as f:
start = time.time()
reps = pickle.load(f)
logger.info("Loaded stored pickle, took {}".format(time.time() - start))
data_dict = {}
try:
with open(config.pickleJsonLocation) as f:
data = json.load(f)
if 'profiles' in data:
for d in data['profiles']:
if 'upi' in d:
data_dict[d['upi']] = d
else:
data_dict = data
except Exception as e:
logger.error("Unable to load data.json: ", e)
# start endpoints
@get('/health')
def healthCheck():
logger.info('Executing GET')
results = {}
results["status"] = "ok"
response.content_type = 'application/json'
return json.dumps(results)
@post('/lpr')
def lpr():
logger.info('Executing POST')
if request.files.get('image'):
image_bytes = request.files.get('image').file.read()
else:
image_bytes = request.body.read()
if len(image_bytes) <= 0:
return {'error': 'Unable to decode posted image!'}
results = config.alpr.recognize_array(image_bytes)
response.content_type = 'application/json'
return json.dumps(results)
@post('/qrr')
def qrr():
logger.info('Executing POST')
if request.files.get('image'):
image_bytes = request.files.get('image').file.read()
else:
image_bytes = request.body.read()
if len(image_bytes) <= 0:
return {'error': 'Unable to decode posted image!'}
pil = Image.open(io.BytesIO(image_bytes)).convert('L')
width, height = pil.size
# wrap image data
raw = pil.tobytes()
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
config.scanner.scan(image)
results = {}
for symbol in image:
#print dir(symbol)
results['type'] = str(symbol.type)
results['data'] = symbol.data
results['location'] = symbol.location
results['quality'] = symbol.quality
results['count'] = symbol.count
#results['components'] = symbol.components
response.content_type = 'application/json'
return json.dumps(results)
@post('/ofr')
def ofr():
logger.info('Executing POST')
if request.files.get('image'):
image_bytes = request.files.get('image').file.read()
else:
image_bytes = request.body.read()
img_array = np.asarray(bytearray(image_bytes), dtype=np.uint8)
print("recieved image of size {}".format(len(img_array)))
image_data = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
if image_data is None:
print("Unable to decode posted image!")
response.status = 500
return {'error': 'Unable to decode posted image!'}
bboxes = []
try:
start = time.time()
bboxes = openfaceUtils.getBoundingBoxes(image_data)
print("Got face representation in {} seconds".format(time.time() - start))
except Exception as e:
print("Error: {}".format(e))
response.status = 500
return {'error': str(e)}
ids_to_compare = request.params.get('ids_to_compare', reps.keys())
results = []
for bb in bboxes:
position = bb['position']
rep = bb['rep']
best = 4
bestUid = "unknown"
for i in ids_to_compare:
if type(reps[i]) is not list:
reps[i] = [reps[i]]
for r in reps[i]:
d = rep - r
dot = np.dot(d,d)
if dot < best:
best = dot
bestUid = i
results.append({"match": bestUid, "confidence": 1 - best/4, "data": data_dict.get(bestUid), "position": position})
resp = {
'results': results
}
response.content_type = 'application/json'
return json.dumps(resp)
@post('/odr')
def odr():
logger.info('Executing POST')
if request.files.get('image'):
image_bytes = request.files.get('image').file.read()
else:
image_bytes = request.body.read()
if len(image_bytes) <= 0:
return {'error': 'Unable to decode posted image!'}
img_array = np.asarray(bytearray(image_bytes), dtype=np.uint8)
print("recieved image of size {}".format(len(img_array)))
image_data = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
if image_data is None:
print("Unable to decode posted image!")
response.status = 500
return {'error': 'Unable to decode posted image!'}
results = {}
mxnetModel = request.forms.get('model', default='squeezenet_v1.1')
if mxnetModel == 'vgg19':
results = config.vgg19.predict_from_file(image_data)
else:
mxnetModel = 'squeezenet_v1.1'
results = config.squeezenet.predict_from_file(image_data)
results['model'] = mxnetModel
response.content_type = 'application/json'
return json.dumps(results)
@get('/faces')
def faces_site():
logger.info('Executing GET')
return static_file("site/faces.html", ".")
@post('/faces/generate')
def faces_generate():
logger.info('Executing POST')
openfaceUtils.generatePickle()
results = {"status": "ok"}
response.content_type = 'application/json'
return json.dumps(results)
@get('/faces/<uid>')
def faces_get(uid):
logger.info('Executing GET')
f = glob.glob("/root/data/images/{}/*".format(uid))
return static_file(f[0], '/')
# start server
port = int(os.environ.get('PORT', 8888))
if __name__ == "__main__":
run(host='0.0.0.0', port=port, debug=True, server='gunicorn', workers=4)
app = default_app()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Read images and perform augmentations for object detection."""
from __future__ import absolute_import, print_function
import random
import logging
import json
import numpy as np
from ..base import numeric_types
from .. import ndarray as nd
from .._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .image import RandomOrderAug, ColorJitterAug, LightingAug, ColorNormalizeAug
from .image import ResizeAug, ForceResizeAug, CastAug, HueJitterAug, RandomGrayAug
from .image import fixed_crop, ImageIter, Augmenter
class DetAugmenter(object):
"""Detection base augmenter"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src, label):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class DetBorrowAug(DetAugmenter):
"""Borrow standard augmenter from image classification.
Which is good once you know label won't be affected after this augmenter.
Parameters
----------
augmenter : mx.image.Augmenter
The borrowed standard augmenter which has no effect on label
"""
def __init__(self, augmenter):
if not isinstance(augmenter, Augmenter):
raise TypeError('Borrowing from invalid Augmenter')
super(DetBorrowAug, self).__init__(augmenter=augmenter.dumps())
self.augmenter = augmenter
def dumps(self):
"""Override the default one to avoid duplicate dump."""
return [self.__class__.__name__.lower(), self.augmenter.dumps()]
def __call__(self, src, label):
"""Augmenter implementation body"""
src = self.augmenter(src)
return (src, label)
class DetRandomSelectAug(DetAugmenter):
"""Randomly select one augmenter to apply, with chance to skip all.
Parameters
----------
aug_list : list of DetAugmenter
The random selection will be applied to one of the augmenters
skip_prob : float
The probability to skip all augmenters and return input directly
"""
def __init__(self, aug_list, skip_prob=0):
super(DetRandomSelectAug, self).__init__(skip_prob=skip_prob)
if not isinstance(aug_list, (list, tuple)):
aug_list = [aug_list]
for aug in aug_list:
if not isinstance(aug, DetAugmenter):
raise ValueError('Allow DetAugmenter in list only')
if not aug_list:
skip_prob = 1 # disabled
self.aug_list = aug_list
self.skip_prob = skip_prob
def dumps(self):
"""Override default."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.aug_list]]
def __call__(self, src, label):
"""Augmenter implementation body"""
if random.random() < self.skip_prob:
return (src, label)
else:
random.shuffle(self.aug_list)
return self.aug_list[0](src, label)
class DetHorizontalFlipAug(DetAugmenter):
"""Random horizontal flipping.
Parameters
----------
p : float
chance [0, 1] to flip
"""
def __init__(self, p):
super(DetHorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src, label):
"""Augmenter implementation"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
self._flip_label(label)
return (src, label)
def _flip_label(self, label):
"""Helper function to flip label."""
tmp = 1.0 - label[:, 1]
label[:, 1] = 1.0 - label[:, 3]
label[:, 3] = tmp
class DetRandomCropAug(DetAugmenter):
"""Random cropping with constraints
Parameters
----------
min_object_covered : float, default=0.1
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float, default=0.3
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats, default=(0.75, 1.33)
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats, default=(0.05, 1.0)
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int, default=50
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
"""
def __init__(self, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0), min_eject_coverage=0.3, max_attempts=50):
if not isinstance(aspect_ratio_range, (tuple, list)):
assert isinstance(aspect_ratio_range, numeric_types)
logging.info('Using fixed aspect ratio: %s in DetRandomCropAug',
str(aspect_ratio_range))
aspect_ratio_range = (aspect_ratio_range, aspect_ratio_range)
if not isinstance(area_range, (tuple, list)):
assert isinstance(area_range, numeric_types)
logging.info('Using fixed area range: %s in DetRandomCropAug', area_range)
area_range = (area_range, area_range)
super(DetRandomCropAug, self).__init__(min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
min_eject_coverage=min_eject_coverage,
max_attempts=max_attempts)
self.min_object_covered = min_object_covered
self.min_eject_coverage = min_eject_coverage
self.max_attempts = max_attempts
self.aspect_ratio_range = aspect_ratio_range
self.area_range = area_range
self.enabled = False
if (area_range[1] <= 0 or area_range[0] > area_range[1]):
logging.warn('Skip DetRandomCropAug due to invalid area_range: %s', area_range)
elif (aspect_ratio_range[0] > aspect_ratio_range[1] or aspect_ratio_range[0] <= 0):
logging.warn('Skip DetRandomCropAug due to invalid aspect_ratio_range: %s',
aspect_ratio_range)
else:
self.enabled = True
def __call__(self, src, label):
"""Augmenter implementation body"""
crop = self._random_crop_proposal(label, src.shape[0], src.shape[1])
if crop:
x, y, w, h, label = crop
src = fixed_crop(src, x, y, w, h, None)
return (src, label)
def _calculate_areas(self, label):
"""Calculate areas for multiple labels"""
heights = np.maximum(0, label[:, 3] - label[:, 1])
widths = np.maximum(0, label[:, 2] - label[:, 0])
return heights * widths
def _intersect(self, label, xmin, ymin, xmax, ymax):
"""Calculate intersect areas, normalized."""
left = np.maximum(label[:, 0], xmin)
right = np.minimum(label[:, 2], xmax)
top = np.maximum(label[:, 1], ymin)
bot = np.minimum(label[:, 3], ymax)
invalid = np.where(np.logical_or(left >= right, top >= bot))[0]
out = label.copy()
out[:, 0] = left
out[:, 1] = top
out[:, 2] = right
out[:, 3] = bot
out[invalid, :] = 0
return out
def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied"""
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas
coverages = coverages[np.where(coverages > 0)[0]]
if coverages.size > 0 and np.amin(coverages) > self.min_object_covered:
return True
def _update_labels(self, label, crop_box, height, width):
"""Convert labels according to crop box"""
xmin = float(crop_box[0]) / width
ymin = float(crop_box[1]) / height
w = float(crop_box[2]) / width
h = float(crop_box[3]) / height
out = label.copy()
out[:, (1, 3)] -= xmin
out[:, (2, 4)] -= ymin
out[:, (1, 3)] /= w
out[:, (2, 4)] /= h
out[:, 1:5] = np.maximum(0, out[:, 1:5])
out[:, 1:5] = np.minimum(1, out[:, 1:5])
coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:])
valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2])
valid = np.logical_and(valid, coverage > self.min_eject_coverage)
valid = np.where(valid)[0]
if valid.size < 1:
return None
out = out[valid, :]
return out
def _random_crop_proposal(self, label, height, width):
"""Propose cropping areas"""
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(max_h * ratio) > width:
# find smallest max_h satifying round(max_h * ratio) <= width
max_h = int((width + 0.4999999) / ratio)
if max_h > height:
max_h = height
if h > max_h:
h = max_h
if h < max_h:
# generate random h in range [h, max_h]
h = random.randint(h, max_h)
w = int(round(h * ratio))
assert w <= width
# trying to fix rounding problems
area = w * h
if area < min_area:
h += 1
w = int(round(h * ratio))
area = w * h
if area > max_area:
h -= 1
w = int(round(h * ratio))
area = w * h
if (area < min_area or area > max_area or w > width or h > height \
or w <= 0 or h <= 0):
continue
y = random.randint(0, max(0, height - h))
x = random.randint(0, max(0, width - w))
if self._check_satisfy_constraints(label, x, y, x + w, y + h, width, height):
new_label = self._update_labels(label, (x, y, w, h), height, width)
if new_label is not None:
return (x, y, w, h, new_label)
return ()
class DetRandomPadAug(DetAugmenter):
"""Random padding augmenter.
Parameters
----------
aspect_ratio_range : tuple of floats, default=(0.75, 1.33)
The padded area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats, default=(1.0, 3.0)
The padded area of the image must be larger than the original area
max_attempts : int, default=50
Number of attempts at generating a padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
pad_val: float or tuple of float, default=(128, 128, 128)
pixel value to be filled when padding is enabled.
"""
def __init__(self, aspect_ratio_range=(0.75, 1.33), area_range=(1.0, 3.0),
max_attempts=50, pad_val=(128, 128, 128)):
if not isinstance(pad_val, (list, tuple)):
assert isinstance(pad_val, numeric_types)
pad_val = (pad_val)
if not isinstance(aspect_ratio_range, (list, tuple)):
assert isinstance(aspect_ratio_range, numeric_types)
logging.info('Using fixed aspect ratio: %s in DetRandomPadAug',
str(aspect_ratio_range))
aspect_ratio_range = (aspect_ratio_range, aspect_ratio_range)
if not isinstance(area_range, (tuple, list)):
assert isinstance(area_range, numeric_types)
logging.info('Using fixed area range: %s in DetRandomPadAug', area_range)
area_range = (area_range, area_range)
super(DetRandomPadAug, self).__init__(aspect_ratio_range=aspect_ratio_range,
area_range=area_range, max_attempts=max_attempts,
pad_val=pad_val)
self.pad_val = pad_val
self.aspect_ratio_range = aspect_ratio_range
self.area_range = area_range
self.max_attempts = max_attempts
self.enabled = False
if (area_range[1] <= 1.0 or area_range[0] > area_range[1]):
logging.warn('Skip DetRandomPadAug due to invalid parameters: %s', area_range)
elif (aspect_ratio_range[0] <= 0 or aspect_ratio_range[0] > aspect_ratio_range[1]):
logging.warn('Skip DetRandomPadAug due to invalid aspect_ratio_range: %s',
aspect_ratio_range)
else:
self.enabled = True
def __call__(self, src, label):
"""Augmenter body"""
height, width, _ = src.shape
pad = self._random_pad_proposal(label, height, width)
if pad:
x, y, w, h, label = pad
src = copyMakeBorder(src, y, h-y-height, x, w-x-width, 16, values=self.pad_val)
return (src, label)
def _update_labels(self, label, pad_box, height, width):
"""Update label according to padding region"""
out = label.copy()
out[:, (1, 3)] = (out[:, (1, 3)] * width + pad_box[0]) / pad_box[2]
out[:, (2, 4)] = (out[:, (2, 4)] * height + pad_box[1]) / pad_box[3]
return out
def _random_pad_proposal(self, label, height, width):
"""Generate random padding region"""
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(h * ratio) < width:
h = int((width + 0.499999) / ratio)
if h < height:
h = height
if h > max_h:
h = max_h
if h < max_h:
h = random.randint(h, max_h)
w = int(round(h * ratio))
if (h - height) < 2 or (w - width) < 2:
continue # marginal padding is not helpful
y = random.randint(0, max(0, h - height))
x = random.randint(0, max(0, w - width))
new_label = self._update_labels(label, (x, y, w, h), height, width)
return (x, y, w, h, new_label)
return ()
def CreateMultiRandCropAugmenter(min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0), min_eject_coverage=0.3,
max_attempts=50, skip_prob=0):
"""Helper function to create multiple random crop augmenters.
Parameters
----------
min_object_covered : float or list of float, default=0.1
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float or list of float, default=0.3
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats or list of tuple of floats, default=(0.75, 1.33)
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats or list of tuple of floats, default=(0.05, 1.0)
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int or list of int, default=50
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
Examples
--------
>>> # An example of creating multiple random crop augmenters
>>> min_object_covered = [0.1, 0.3, 0.5, 0.7, 0.9] # use 5 augmenters
>>> aspect_ratio_range = (0.75, 1.33) # use same range for all augmenters
>>> area_range = [(0.1, 1.0), (0.2, 1.0), (0.2, 1.0), (0.3, 0.9), (0.5, 1.0)]
>>> min_eject_coverage = 0.3
>>> max_attempts = 50
>>> aug = mx.image.det.CreateMultiRandCropAugmenter(min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range, area_range=area_range,
min_eject_coverage=min_eject_coverage, max_attempts=max_attempts,
skip_prob=0)
>>> aug.dumps() # show some details
"""
def align_parameters(params):
"""Align parameters as pairs"""
out_params = []
num = 1
for p in params:
if not isinstance(p, list):
p = [p]
out_params.append(p)
num = max(num, len(p))
# align for each param
for k, p in enumerate(out_params):
if len(p) != num:
assert len(p) == 1
out_params[k] = p * num
return out_params
aligned_params = align_parameters([min_object_covered, aspect_ratio_range, area_range,
min_eject_coverage, max_attempts])
augs = []
for moc, arr, ar, mec, ma in zip(*aligned_params):
augs.append(DetRandomCropAug(min_object_covered=moc, aspect_ratio_range=arr,
area_range=ar, min_eject_coverage=mec, max_attempts=ma))
return DetRandomSelectAug(augs, skip_prob=skip_prob)
def CreateDetAugmenter(data_shape, resize=0, rand_crop=0, rand_pad=0, rand_gray=0,
rand_mirror=False, mean=None, std=None, brightness=0, contrast=0,
saturation=0, pca_noise=0, hue=0, inter_method=2, min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 3.0),
min_eject_coverage=0.3, max_attempts=50, pad_val=(127, 127, 127)):
"""Create augmenters for detection.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : float
[0, 1], probability to apply random cropping
rand_pad : float
[0, 1], probability to apply random padding
rand_gray : float
[0, 1], probability to convert to grayscale for all channels
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
min_object_covered : float
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
pad_val: float
Pixel value to be filled when padding is enabled. pad_val will automatically
be subtracted by mean and divided by std if applicable.
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateDetAugmenter(data_shape=(3, 300, 300), rand_crop=0.5,
... rand_pad=0.5, rand_mirror=True, mean=True, brightness=0.125, contrast=0.125,
... saturation=0.125, pca_noise=0.05, inter_method=10, min_object_covered=[0.3, 0.5, 0.9],
... area_range=(0.3, 3.0))
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(DetBorrowAug(ResizeAug(resize, inter_method)))
if rand_crop > 0:
crop_augs = CreateMultiRandCropAugmenter(min_object_covered, aspect_ratio_range,
area_range, min_eject_coverage,
max_attempts, skip_prob=(1 - rand_crop))
auglist.append(crop_augs)
if rand_mirror > 0:
auglist.append(DetHorizontalFlipAug(0.5))
# apply random padding as late as possible to save computation
if rand_pad > 0:
pad_aug = DetRandomPadAug(aspect_ratio_range,
(1.0, area_range[1]), max_attempts, pad_val)
auglist.append(DetRandomSelectAug([pad_aug], 1 - rand_pad))
# force resize
auglist.append(DetBorrowAug(ForceResizeAug((data_shape[2], data_shape[1]), inter_method)))
auglist.append(DetBorrowAug(CastAug()))
if brightness or contrast or saturation:
auglist.append(DetBorrowAug(ColorJitterAug(brightness, contrast, saturation)))
if hue:
auglist.append(DetBorrowAug(HueJitterAug(hue)))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(DetBorrowAug(LightingAug(pca_noise, eigval, eigvec)))
if rand_gray > 0:
auglist.append(DetBorrowAug(RandomGrayAug(rand_gray)))
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(DetBorrowAug(ColorNormalizeAug(mean, std)))
return auglist
class ImageDetIter(ImageIter):
"""Image iterator with a large number of augmentation choices for detection.
Parameters
----------
aug_list : list or None
Augmenter list for generating distorted images
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Name for detection labels
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateDetAugmenter.
"""
def __init__(self, batch_size, data_shape,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='label', **kwargs):
super(ImageDetIter, self).__init__(batch_size=batch_size, data_shape=data_shape,
path_imgrec=path_imgrec, path_imglist=path_imglist,
path_root=path_root, path_imgidx=path_imgidx,
shuffle=shuffle, part_index=part_index,
num_parts=num_parts, aug_list=[], imglist=imglist,
data_name=data_name, label_name=label_name)
if aug_list is None:
self.auglist = CreateDetAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
# went through all labels to get the proper label shape
label_shape = self._estimate_label_shape()
self.provide_label = [(label_name, (self.batch_size, label_shape[0], label_shape[1]))]
self.label_shape = label_shape
def _check_valid_label(self, label):
"""Validate label and its shape."""
if len(label.shape) != 2 or label.shape[1] < 5:
msg = "Label with shape (1+, 5+) required, %s received." % str(label)
raise RuntimeError(msg)
valid_label = np.where(np.logical_and(label[:, 0] >= 0, label[:, 3] > label[:, 1],
label[:, 4] > label[:, 2]))[0]
if valid_label.size < 1:
raise RuntimeError('Invalid label occurs.')
def _estimate_label_shape(self):
"""Helper function to estimate label shape"""
max_count = 0
self.reset()
try:
while True:
label, _ = self.next_sample()
label = self._parse_label(label)
max_count = max(max_count, label.shape[0])
except StopIteration:
pass
self.reset()
return (max_count, label.shape[1])
def _parse_label(self, label):
"""Helper function to parse object detection label.
Format for raw label:
n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
where n is the width of header, 2 or larger
k is the width of each object annotation, can be arbitrary, at least 5
"""
if isinstance(label, nd.NDArray):
label = label.asnumpy()
raw = label.ravel()
if raw.size < 7:
raise RuntimeError("Label shape is invalid: " + str(raw.shape))
header_width = int(raw[0])
obj_width = int(raw[1])
if (raw.size - header_width) % obj_width != 0:
msg = "Label shape %s inconsistent with annotation width %d." \
%(str(raw.shape), obj_width)
raise RuntimeError(msg)
out = np.reshape(raw[header_width:], (-1, obj_width))
# remove bad ground-truths
valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
if valid.size < 1:
raise RuntimeError('Encounter sample with no valid label.')
return out[valid, :]
def reshape(self, data_shape=None, label_shape=None):
"""Reshape iterator for data_shape or label_shape.
Parameters
----------
data_shape : tuple or None
Reshape the data_shape to the new shape if not None
label_shape : tuple or None
Reshape label shape to new shape if not None
"""
if data_shape is not None:
self.check_data_shape(data_shape)
self.provide_data = [(self.provide_data[0][0], (self.batch_size,) + data_shape)]
if label_shape is not None:
self.check_label_shape(label_shape)
self.provide_label = [(self.provide_label[0][0], (self.batch_size,) + label_shape)]
def next(self):
"""Override the function for returning next batch."""
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.zeros((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
batch_label[:] = -1
i = 0
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
data, label = self.augmentation_transform(data, label)
self._check_valid_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
for datum in [data]:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(datum)
num_object = label.shape[0]
batch_label[i][0:num_object] = nd.array(label)
if num_object < batch_label[i].shape[0]:
batch_label[i][num_object:] = -1
i += 1
except StopIteration:
if not i:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size - i)
def augmentation_transform(self, data, label): # pylint: disable=arguments-differ
"""Override Transforms input data with specified augmentations."""
for aug in self.auglist:
data, label = aug(data, label)
return (data, label)
def check_label_shape(self, label_shape):
"""Checks if the new label shape is valid"""
if not len(label_shape) == 2:
raise ValueError('label_shape should have length 2')
if label_shape[0] < self.label_shape[0]:
msg = 'Attempts to reduce label count from %d to %d, not allowed.' \
% (self.label_shape[0], label_shape[0])
raise ValueError(msg)
if label_shape[1] != self.provide_label[0][1][2]:
msg = 'label_shape object width inconsistent: %d vs %d.' \
% (self.provide_label[0][1][2], label_shape[1])
raise ValueError(msg)
def draw_next(self, color=None, thickness=2, mean=None, std=None, clip=True,
waitKey=None, window_name='draw_next'):
"""Display next image with bounding boxes drawn.
Parameters
----------
color : tuple
Bounding box color in RGB, use None for random color
thickness : int
Bounding box border thickness
mean : True or numpy.ndarray
Compensate for the mean to have better visual effect
std : True or numpy.ndarray
Revert standard deviations
clip : bool
If true, clip to [0, 255] for better visual effect
waitKey : None or int
Hold the window for waitKey milliseconds if set, skip ploting if None
window_name : str
Plot window name if waitKey is set.
Returns
-------
numpy.ndarray
Examples
--------
>>> # use draw_next to get images with bounding boxes drawn
>>> iterator = mx.image.ImageDetIter(1, (3, 600, 600), path_imgrec='train.rec')
>>> for image in iterator.draw_next(waitKey=None):
... # display image
>>> # or let draw_next display using cv2 module
>>> for image in iterator.draw_next(waitKey=0, window_name='disp'):
... pass
"""
try:
import cv2
except ImportError as e:
logging.warn('Unable to import cv2, skip drawing: %s', str(e))
raise StopIteration
count = 0
try:
while True:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
count += 1
data, label = self.augmentation_transform(data, label)
image = data.asnumpy()
# revert color_normalize
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if std is not None:
image *= std
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if mean is not None:
image += mean
# swap RGB
image[:, :, (0, 1, 2)] = image[:, :, (2, 1, 0)]
if clip:
image = np.maximum(0, np.minimum(255, image))
if color:
color = color[::-1]
image = image.astype(np.uint8)
height, width, _ = image.shape
for i in range(label.shape[0]):
x1 = int(label[i, 1] * width)
if x1 < 0:
continue
y1 = int(label[i, 2] * height)
x2 = int(label[i, 3] * width)
y2 = int(label[i, 4] * height)
bc = np.random.rand(3) * 255 if not color else color
cv2.rectangle(image, (x1, y1), (x2, y2), bc, thickness)
if waitKey is not None:
cv2.imshow(window_name, image)
cv2.waitKey(waitKey)
yield image
except StopIteration:
if not count:
raise StopIteration
def sync_label_shape(self, it, verbose=False):
"""Synchronize label shape with the input iterator. This is useful when
train/validation iterators have different label padding.
Parameters
----------
it : ImageDetIter
The other iterator to synchronize
verbose : bool
Print verbose log if true
Returns
-------
ImageDetIter
The synchronized other iterator, the internal label shape is updated as well.
Examples
--------
>>> train_iter = mx.image.ImageDetIter(32, (3, 300, 300), path_imgrec='train.rec')
>>> val_iter = mx.image.ImageDetIter(32, (3, 300, 300), path.imgrec='val.rec')
>>> train_iter.label_shape
(30, 6)
>>> val_iter.label_shape
(25, 6)
>>> val_iter = train_iter.sync_label_shape(val_iter, verbose=False)
>>> train_iter.label_shape
(30, 6)
>>> val_iter.label_shape
(30, 6)
"""
assert isinstance(it, ImageDetIter), 'Synchronize with invalid iterator.'
train_label_shape = self.label_shape
val_label_shape = it.label_shape
assert train_label_shape[1] == val_label_shape[1], "object width mismatch."
max_count = max(train_label_shape[0], val_label_shape[0])
if max_count > train_label_shape[0]:
self.reshape(None, (max_count, train_label_shape[1]))
if max_count > val_label_shape[0]:
it.reshape(None, (max_count, val_label_shape[1]))
if verbose and max_count > min(train_label_shape[0], val_label_shape[0]):
logging.info('Resized label_shape to (%d, %d).', max_count, train_label_shape[1])
return it
|
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
x0=2
x1=1073741824
x2=37
x3=37
x4=37
x5=5
x6=37
def _0():
global t0
t0=0
sa(2)
sa(5)
return 1
def _1():
global x0
sa(x0-1)
sa(x0-1)
return 2
def _2():
return (4)if(sp()!=0)else(3)
def _3():
global x0
global t0
global x5
sp();
sp();
sa(sp()+1)
sa(sr());
x0=sr()
t0=(sr()*3)-1
sa(sp()*t0)
sa(sp()/2);
x5=sr()
return 1
def _4():
global x2
global t0
global t1
global x3
global x6
global x4
global x1
x2=sr()
t0=(sr()*3)-1
sa(sp()*t0)
t1=sp()
t1=t1/2
x3=t1
x6=0
sa(sp()-t1)
sa(sp()*24)
sa(sp()+1)
x4=sr()
sa(x1)
sa((1)if(x1>x4)else(0))
return 5
def _5():
return (25)if(sp()!=0)else(6)
def _6():
sa(sr());
return 7
def _7():
return (22)if(sp()!=0)else(8)
def _8():
global x6
global t0
sp();
sa(sp()-(x6*x6))
t0=x6
return (17)if(sp()!=0)else(9)
def _9():
global t0
t0=t0%6
t0=t0-5
return (17)if((t0)!=0)else(10)
def _10():
global x3
global x5
global t0
global x6
global x4
global x1
sa(((x3+x5)*24)+1)
t0=((x3+x5)*24)+1
x6=0
x4=t0
sa(x1)
sa((1)if(x1>x4)else(0))
return 11
def _11():
return (21)if(sp()!=0)else(12)
def _12():
sa(sr());
return 13
def _13():
return (18)if(sp()!=0)else(14)
def _14():
global x6
global t0
sp();
sa(sp()-(x6*x6))
t0=x6
return (17)if(sp()!=0)else(15)
def _15():
global t0
t0=t0%6
t0=t0-5
return (17)if((t0)!=0)else(16)
def _16():
global x5
global x3
sys.stdout.write(str(x5-x3)+" ")
sys.stdout.flush()
sp();
return 26
def _17():
global x5
global x2
sa(x5)
sa(x2-1)
sa(x2-1)
return 2
def _18():
global x6
global x4
return (19)if((sr()+x6)>x4)else(20)
def _19():
global x6
x6=x6/2
sa(sp()/4);
sa(sr());
return 13
def _20():
global t0
global x6
global t1
global x4
global t2
t0=sr()+x6
t1=x4
t2=t1-t0
x4=t2
t0=(sr()*2)+x6
x6=t0
x6=x6/2
sa(sp()/4);
return 12
def _21():
global x4
sa(sp()/4);
sa((1)if(sr()>x4)else(0))
return 11
def _22():
global x6
global x4
return (23)if((sr()+x6)>x4)else(24)
def _23():
global x6
x6=x6/2
sa(sp()/4);
sa(sr());
return 7
def _24():
global t0
global x6
global t1
global x4
global t2
t0=sr()+x6
t1=x4
t2=t1-t0
x4=t2
t0=(sr()*2)+x6
x6=t0
x6=x6/2
sa(sp()/4);
return 6
def _25():
global x4
sa(sp()/4);
sa((1)if(sr()>x4)else(0))
return 5
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25]
c=0
while c<26:
c=m[c]()
|
|
#!/usr/bin/env python
# Copyright 2014 Stanford University and Los Alamos National Security, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###
### Type Checker
###
# Backport of singledispatch to Python 2.x.
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
# Work around for OrderedDict missing in Python 2.6.
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from . import ast, types
from .clang import types as ctypes
def is_eq(t): return types.is_POD(t) or types.is_pointer(t)
def returns_same_type(*ts): return ts[0]
def returns_bool(*_ignored): return types.Bool()
unary_operator_table = {
'-': (types.is_numeric, returns_same_type),
'!': (types.is_bool, returns_bool),
'~': (types.is_integral, returns_same_type),
}
binary_operator_table = {
'*': (types.is_numeric, returns_same_type),
'/': (types.is_numeric, returns_same_type),
'%': (types.is_integral, returns_same_type),
'+': (types.is_numeric, returns_same_type),
'-': (types.is_numeric, returns_same_type),
'>>': (types.is_integral, returns_same_type),
'<<': (types.is_integral, returns_same_type),
'<': (types.is_numeric, returns_bool),
'<=': (types.is_numeric, returns_bool),
'>': (types.is_numeric, returns_bool),
'>=': (types.is_numeric, returns_bool),
'==': (is_eq, returns_bool),
'!=': (is_eq, returns_bool),
'&': (types.is_integral, returns_same_type),
'^': (types.is_integral, returns_same_type),
'|': (types.is_integral, returns_same_type),
'&&': (types.is_bool, returns_bool),
'||': (types.is_bool, returns_bool),
}
reduce_operator_table = {
'*': types.is_numeric,
'/': types.is_numeric,
'%': types.is_integral,
'+': types.is_numeric,
'-': types.is_numeric,
'>>': types.is_integral,
'<<': types.is_integral,
'&': types.is_integral,
'^': types.is_integral,
'|': types.is_integral,
}
# A method combination around wrapper a la Common Lisp.
class DispatchAround:
def __init__(self, inner_fn, outer_fn):
self.inner_fn = inner_fn
self.outer_fn = outer_fn
def __call__(self, *args, **kwargs):
return self.outer_fn(self.inner_fn, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.inner_fn, name)
def store_result_in_type_map(fn):
def helper(fn, node, cx):
node_type = fn(node, cx)
cx.type_map[node] = node_type
return node_type
return DispatchAround(fn, helper)
@store_result_in_type_map
@singledispatch
def type_check_node(node, cx):
raise Exception('Type checking failed at %s' % node)
@type_check_node.register(ast.Program)
def _(node, cx):
cx = cx.new_global_scope()
def_types = type_check_node(node.definitions, cx)
return types.Program(def_types)
@type_check_node.register(ast.Definitions)
def _(node, cx):
def_types = []
for definition in node.definitions:
def_types.append(type_check_node(definition, cx))
return def_types
@type_check_node.register(ast.Import)
def _(node, cx):
module_type = ctypes.foreign_type(node.ast, cx.opts)
for foreign_name, foreign_type in module_type.def_types.iteritems():
cx.insert(node, foreign_name, foreign_type)
cx.foreign_types.append(foreign_type)
return module_type
@type_check_node.register(ast.Struct)
def _(node, cx):
original_cx = cx
cx = cx.new_struct_scope()
# Initially create empty struct type.
struct_name = type_check_node(node.name, cx)
param_types = [
cx.region_forest.add(
types.Region(param.name, types.RegionKind(None, None)))
for param in node.params.params]
region_types = [
cx.region_forest.add(
types.Region(region.name, types.RegionKind(None, None)))
for region in node.regions.regions]
struct_constraints = []
empty_field_map = OrderedDict()
struct_type = types.Struct(struct_name, param_types, region_types, struct_constraints, empty_field_map)
def_struct_type = types.Kind(type = struct_type)
# Insert the struct name into global scope.
original_cx.insert(node, struct_name, def_struct_type)
# Figure out the actual types for params and regions and
# insert them into struct scope.
for param, param_type in zip(node.params.params, param_types):
cx.insert(node, param.name, param_type)
param_type.kind = type_check_node(param.type, cx)
if not param_type.validate_regions():
raise types.TypeError(node, 'Region type is inconsistent with itself: %s' % param_type.pretty_kind())
for region, region_type in zip(node.regions.regions, region_types):
cx.insert(node, region.name, region_type)
region_type.kind = type_check_node(region.type, cx)
if not region_type.validate_regions():
raise types.TypeError(node, 'Region type is inconsistent with itself: %s' % region_type.pretty_kind())
struct_constraints = type_check_node(node.constraints, cx)
struct_type.constraints = struct_constraints
field_map = type_check_node(node.field_decls, cx)
struct_type.field_map = field_map
# Note: This simple check only works as long as mutual
# recursion is disallowed on structs.
for field_type in field_map.itervalues():
if field_type == struct_type:
raise types.TypeError(node, 'Struct may not contain itself')
return def_struct_type
@type_check_node.register(ast.StructName)
def _(node, cx):
return node.name
@type_check_node.register(ast.StructConstraints)
def _(node, cx):
return [type_check_node(constraint, cx) for constraint in node.constraints]
@type_check_node.register(ast.StructConstraint)
def _(node, cx):
lhs = type_check_node(node.lhs, cx)
rhs = type_check_node(node.rhs, cx)
if lhs.kind.contains_type != rhs.kind.contains_type:
raise types.TypeError(node, 'Type mismatch in region element types for constraint: %s and %s' % (
lhs.kind.contains_type, rhs.kind.contains_type))
constraint = types.Constraint(node.op, lhs, rhs)
return constraint
@type_check_node.register(ast.StructConstraintRegion)
def _(node, cx):
region_type = cx.lookup(node, node.name)
assert types.is_region(region_type)
return region_type
@type_check_node.register(ast.FieldDecls)
def _(node, cx):
return OrderedDict([
type_check_node(field_decl, cx)
for field_decl in node.field_decls])
@type_check_node.register(ast.FieldDecl)
def _(node, cx):
field_kind = type_check_node(node.field_type, cx)
return (node.name, field_kind.type)
@type_check_node.register(ast.Function)
def _(node, cx):
original_cx = cx
cx = cx.new_function_scope()
fn_name = type_check_node(node.name, cx)
param_types = type_check_node(node.params, cx)
cx.privileges = type_check_node(node.privileges, cx)
return_kind = type_check_node(node.return_type, cx)
assert types.is_kind(return_kind)
return_type = return_kind.type
fn_type = types.Function(param_types, cx.privileges, return_type)
# Insert function name into global scope. Second insert
# prevents parameters from shadowing function name.
original_cx.insert(node, fn_name, fn_type)
cx.insert(node, fn_name, fn_type)
type_check_node(node.block, cx.with_return_type(return_type))
return fn_type
@type_check_node.register(ast.FunctionName)
def _(node, cx):
return node.name
@type_check_node.register(ast.FunctionParams)
def _(node, cx):
return [type_check_node(param, cx)
for param in node.params]
@type_check_node.register(ast.FunctionParam)
def _(node, cx):
if isinstance(node.declared_type, ast.TypeRegionKind):
# Region types may be self-referential. Insert regions
# into scope early to handle recursive types.
region_type = types.Region(node.name, types.RegionKind(None, None))
cx.region_forest.add(region_type)
cx.insert(node, node.name, region_type)
region_kind = type_check_node(node.declared_type, cx)
region_type.kind = region_kind
if not region_type.validate_regions():
raise types.TypeError(node, 'Region type is inconsistent with itself: %s' % region_type.pretty_kind())
return region_type
if isinstance(node.declared_type, ast.TypeArrayKind):
# Region types may be self-referential. Insert regions
# into scope early to handle recursive types.
region_type = types.Region(node.name, types.RegionKind(None, None))
cx.region_forest.add(region_type)
cx.insert(node, node.name, region_type)
region_kind = type_check_node(node.declared_type, cx)
region_type.kind = region_kind
return region_type
if isinstance(node.declared_type, ast.TypeIspaceKind):
ispace_kind = type_check_node(node.declared_type, cx)
ispace_type = types.Ispace(node.name, ispace_kind)
cx.insert(node, node.name, ispace_type)
return ispace_type
# Handle non-region types:
declared_kind = type_check_node(node.declared_type, cx)
assert types.is_kind(declared_kind)
declared_type = declared_kind.type
if types.is_void(declared_type):
raise types.TypeError(node, 'Task parameters are not allowed to be void')
if not types.is_concrete(declared_type):
raise types.TypeError(node, 'Task parameters are not allowed to contain wildcards')
assert types.allows_var_binding(declared_type)
reference_type = types.StackReference(declared_type)
cx.insert(node, node.name, reference_type)
return declared_type
@type_check_node.register(ast.FunctionReturnType)
def _(node, cx):
return type_check_node(node.declared_type, cx)
@type_check_node.register(ast.FunctionPrivileges)
def _(node, cx):
return cx.privileges | set(
privilege
for privilege_node in node.privileges
for privilege in type_check_node(privilege_node, cx))
@type_check_node.register(ast.FunctionPrivilege)
def _(node, cx):
return type_check_node(node.privilege, cx)
@type_check_node.register(ast.TypeVoid)
def _(node, cx):
return types.Kind(types.Void())
@type_check_node.register(ast.TypeBool)
def _(node, cx):
return types.Kind(types.Bool())
@type_check_node.register(ast.TypeDouble)
def _(node, cx):
return types.Kind(types.Double())
@type_check_node.register(ast.TypeFloat)
def _(node, cx):
return types.Kind(types.Float())
@type_check_node.register(ast.TypeInt)
def _(node, cx):
return types.Kind(types.Int())
@type_check_node.register(ast.TypeUInt)
def _(node, cx):
return types.Kind(types.UInt())
@type_check_node.register(ast.TypeInt8)
def _(node, cx):
return types.Kind(types.Int8())
@type_check_node.register(ast.TypeInt16)
def _(node, cx):
return types.Kind(types.Int16())
@type_check_node.register(ast.TypeInt32)
def _(node, cx):
return types.Kind(types.Int32())
@type_check_node.register(ast.TypeInt64)
def _(node, cx):
return types.Kind(types.Int64())
@type_check_node.register(ast.TypeUInt8)
def _(node, cx):
return types.Kind(types.UInt8())
@type_check_node.register(ast.TypeUInt16)
def _(node, cx):
return types.Kind(types.UInt16())
@type_check_node.register(ast.TypeUInt32)
def _(node, cx):
return types.Kind(types.UInt32())
@type_check_node.register(ast.TypeUInt64)
def _(node, cx):
return types.Kind(types.UInt64())
@type_check_node.register(ast.TypeColoring)
def _(node, cx):
region = type_check_node(node.region, cx)
if not (types.is_region(region) or types.is_ispace(region)):
raise types.TypeError(node, 'Type mismatch in type %s: expected %s but got %s' % (
'coloring', 'a region or ispace', region))
return types.Kind(types.Coloring(region))
@type_check_node.register(ast.TypeColoringRegion)
def _(node, cx):
return cx.lookup(node, node.name)
@type_check_node.register(ast.TypeID)
def _(node, cx):
kind = cx.lookup(node, node.name)
args = type_check_node(node.args, cx)
if not types.is_kind(kind):
raise types.TypeError(node, 'Type mismatch in type %s: expected a type but got %s' % (
node.name, kind))
if len(args) != len(kind.type.params):
raise types.TypeError(node, 'Incorrect number of arguments for struct %s: expected %s but got %s' % (
node.name, len(kind.type.params), len(args)))
region_map = dict([
(old_region, new_region)
for old_region, new_region in zip(kind.type.params, args)])
for param, arg in zip(kind.type.params, args):
assert types.is_region(param)
if types.is_region(arg):
if param.kind.contains_type is not None and arg.kind.contains_type is not None:
param_kind = param.kind.substitute_regions(region_map)
arg_kind = arg.kind
if param_kind != arg_kind:
raise types.TypeError(node, 'Type mismatch in type parameter to %s: expected %s but got %s' % (
node.name, param_kind, arg_kind))
elif types.is_region_wild(arg):
pass
else:
assert False
return kind.instantiate_params(region_map)
@type_check_node.register(ast.TypeArgs)
def _(node, cx):
return [type_check_node(arg, cx) for arg in node.args]
@type_check_node.register(ast.TypeArg)
def _(node, cx):
arg = cx.lookup(node, node.name)
if not types.is_region(arg):
raise types.TypeError(node, 'Type mismatch in type %s: expected a region but got %s' % (
node.name, arg))
return arg
@type_check_node.register(ast.TypeArgWild)
def _(node, cx):
return types.RegionWild()
@type_check_node.register(ast.TypePointer)
def _(node, cx):
points_to_kind = type_check_node(node.points_to_type, cx)
regions = type_check_node(node.regions, cx)
assert types.is_kind(points_to_kind)
points_to_type = points_to_kind.type
for region in regions:
if types.is_region(region):
contains_type = region.kind.contains_type
if contains_type is not None and contains_type != points_to_type:
raise types.TypeError(node, 'Type mismatch in pointer type: expected %s but got %s' % (
contains_type, points_to_type))
elif types.is_region_wild(region):
pass
else:
if not types.is_kind(region):
raise types.TypeError(node, 'Type mismatch in pointer type: expected a region but got %s' % (
region))
raise types.TypeError(node, 'Type mismatch in pointer type: expected a region but got %s' % (
region.type))
return types.Kind(types.Pointer(points_to_type, regions))
@type_check_node.register(ast.TypePointerRegions)
def _(node, cx):
return [type_check_node(region, cx)
for region in node.regions]
@type_check_node.register(ast.TypeRegion)
def _(node, cx):
region_type = cx.lookup(node, node.name)
return region_type
@type_check_node.register(ast.TypeRegionWild)
def _(node, cx):
return types.RegionWild()
@type_check_node.register(ast.TypeRegionKind)
def _(node, cx):
contains_type = None
if node.contains_type is not None:
contains_type = type_check_node(node.contains_type, cx).type
return types.RegionKind(None, contains_type)
@type_check_node.register(ast.TypeArrayKind)
def _(node, cx):
ispace = type_check_node(node.ispace, cx)
contains_type = type_check_node(node.contains_type, cx).type
return types.RegionKind(ispace, contains_type)
@type_check_node.register(ast.TypeIspace)
def _(node, cx):
ispace_type = cx.lookup(node, node.name)
return ispace_type
@type_check_node.register(ast.TypeIspaceKind)
def _(node, cx):
index_type = type_check_node(node.index_type, cx).type
return types.IspaceKind(index_type)
@type_check_node.register(ast.Privilege)
def _(node, cx):
if node.privilege == 'reads':
privilege = types.Privilege.READ
elif node.privilege == 'writes':
privilege = types.Privilege.WRITE
elif node.privilege == 'reduces':
privilege = types.Privilege.REDUCE
else:
assert False
regions = type_check_node(node.regions, cx)
return [
types.Privilege(node, privilege, node.op, region, field_path)
for region, field_path in regions]
@type_check_node.register(ast.PrivilegeRegions)
def _(node, cx):
return [
region
for region_node in node.regions
for region in type_check_node(region_node, cx)]
@type_check_node.register(ast.PrivilegeRegion)
def _(node, cx):
region = cx.lookup(node, node.name)
field_paths = type_check_node(node.fields, cx)
return [(region, field_path) for field_path in field_paths]
@type_check_node.register(ast.PrivilegeRegionFields)
def _(node, cx):
if len(node.fields) == 0:
return [()]
return [
field_path
for field_node in node.fields
for field_path in type_check_node(field_node, cx)]
@type_check_node.register(ast.PrivilegeRegionField)
def _(node, cx):
prefix = (node.name,)
field_paths = type_check_node(node.fields, cx)
return [prefix + field_path for field_path in field_paths]
@type_check_node.register(ast.Block)
def _(node, cx):
cx = cx.new_block_scope()
for expr in node.block:
type_check_node(expr, cx)
return types.Void()
@type_check_node.register(ast.StatementAssert)
def _(node, cx):
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
if not types.is_bool(expr_type):
raise types.TypeError(node, 'Type mismatch in assert statement: expected %s but got %s' % (
types.Bool(), expr_type))
return types.Void()
@type_check_node.register(ast.StatementExpr)
def _(node, cx):
type_check_node(node.expr, cx).check_read(node.expr, cx)
return types.Void()
@type_check_node.register(ast.StatementIf)
def _(node, cx):
condition_type = type_check_node(node.condition, cx).check_read(node.condition, cx)
type_check_node(node.then_block, cx)
if node.else_block is not None:
type_check_node(node.else_block, cx)
if not types.is_bool(condition_type):
raise types.TypeError(node, 'If condition expression is not type bool')
return types.Void()
@type_check_node.register(ast.StatementFor)
def _(node, cx):
cx = cx.new_block_scope()
index_types = type_check_node(node.indices, cx)
region_types = type_check_node(node.regions, cx)
if len(index_types) != len(region_types):
raise types.TypeError(node, 'Incorrect number of indices in for statement: expected %s but got %s' % (
len(region_types), len(index_types)))
# Two forms of iteration are supported, over a single index
# space, or over any number of regions. In the case where
# multiple regions are being iterated, it is assumed the
# regions have the same index space. At the moment this has to
# be checked dynamically to be sound.
if len(region_types) == 1 and types.is_ispace(region_types[0]):
index_node = node.indices.indices[0]
index_type = index_types[0]
ispace_type = region_types[0]
# We can infer the index type if unspecified.
if index_type is None:
index_type = ispace_type.kind.index_type
if index_type != ispace_type.kind.index_type:
raise types.TypeError(node, 'Type mismatch in for statement: expected %s but got %s' % (
index_type, ispace_type.kind.index_type))
# Patch environment and type map to know about the inferred index type.
cx.insert(node, index_node.name, index_type)
cx.type_map[index_node] = index_type
else:
for index_node, index_type, region_type, index \
in zip(node.indices.indices, index_types, region_types, xrange(len(index_types))):
if not types.is_region(region_type):
raise types.TypeError(node, 'Type mismatch on index %s of for statement: expected a region but got %s' % (
index, region_type))
# We can infer the index type as long as the region is explicitly typed.
if index_type is None:
if region_type.kind.contains_type is None:
raise types.TypeError(node, 'Unable to infer type of index %s of for statement: region %s has no element type' % (
index, region_type))
index_type = types.Pointer(region_type.kind.contains_type, [region_type])
if not types.is_pointer(index_type):
raise types.TypeError(node, 'Type mismatch on index %s of for statement: expected a pointer but got %s' % (
index, index_type))
if len(index_type.regions) != 1 or index_type.regions[0] != region_type:
raise types.TypeError(node, 'Type mismatch on index %s of for statement: expected %s but got %s' % (
index, index_type,
types.Pointer(region_type.kind.contains_type, [region_type])))
# Patch environment and type map to know about the inferred index type.
cx.insert(node, index_node.name, index_type)
cx.type_map[index_node] = index_type
type_check_node(node.block, cx)
return types.Void()
@type_check_node.register(ast.ForIndices)
def _(node, cx):
return [type_check_node(index, cx)
for index in node.indices]
@type_check_node.register(ast.ForIndex)
def _(node, cx):
if node.type is not None:
declared_kind = type_check_node(node.type, cx)
assert types.is_kind(declared_kind)
return declared_kind.type
return None
@type_check_node.register(ast.ForRegions)
def _(node, cx):
return [type_check_node(region, cx)
for region in node.regions]
@type_check_node.register(ast.ForRegion)
def _(node, cx):
region_type = cx.lookup(node, node.name)
return region_type
@type_check_node.register(ast.StatementLet)
def _(node, cx):
declared_type = None
if node.type is not None:
declared_kind = type_check_node(node.type, cx)
if types.is_region_kind(declared_kind):
declared_type = types.Region(node.name, declared_kind)
cx.region_forest.add(declared_type)
if types.is_kind(declared_kind):
declared_type = declared_kind.type
else:
assert False
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
# Hack: Rather full type inference, which gets ugly fast, just
# implement "auto-style" inference by using the expression
# type if no type declaration is provided.
if declared_type is None:
if types.is_region(expr_type):
declared_type = types.Region(node.name, expr_type.kind)
cx.region_forest.add(declared_type)
else:
declared_type = expr_type
if not types.is_concrete(declared_type):
raise types.TypeError(node, 'Let bound expressions are not allowed to contain wildcards')
if types.is_void(declared_type):
raise types.TypeError(node, 'Let bound expressions are not allowed to be void')
if types.is_region(expr_type) and types.is_region(declared_type):
if expr_type.kind != declared_type.kind:
raise types.TypeError(node, 'Let bound expression of type %s does not match declared type %s' % (
expr_type.kind, declared_type.kind))
else:
if expr_type != declared_type:
raise types.TypeError(node, 'Let bound expression of type %s does not match declared type %s' % (
expr_type, declared_type))
cx.insert(node, node.name, declared_type, shadow = True)
if types.is_region(expr_type):
cx.region_forest.union(declared_type, expr_type)
cx.constraints.add(
types.Constraint(lhs = expr_type, op = types.Constraint.SUBREGION, rhs = declared_type))
cx.constraints.add(
types.Constraint(lhs = declared_type, op = types.Constraint.SUBREGION, rhs = expr_type))
return declared_type
@type_check_node.register(ast.StatementLetRegion)
def _(node, cx):
region_type = types.Region(node.name, types.RegionKind(None, None))
cx.region_forest.add(region_type)
# Insert region name into scope so that element type can refer to it.
cx.insert(node, node.name, region_type)
declared_region_kind = None
if node.region_kind is not None:
declared_region_kind = type_check_node(node.region_kind, cx)
element_kind = type_check_node(node.element_type, cx)
size_type = type_check_node(node.size_expr, cx).check_read(node.size_expr, cx)
assert types.is_kind(element_kind) and not types.is_void(element_kind.type)
if not types.is_int(size_type):
raise types.TypeError(node, 'Type mismatch in region: expected %s but got %s' % (
types.Int(), size_type))
# Now patch region type so that it refers to the contained type.
region_kind = types.RegionKind(None, element_kind.type)
region_type.kind = region_kind
if not region_type.validate_regions():
raise types.TypeError(node, 'Region type is inconsistent with itself: %s' % region_type.pretty_kind())
if declared_region_kind is None:
declared_region_kind = region_kind
if declared_region_kind != region_kind:
raise types.TypeError(node, 'Let bound expression of type %s does not match declared type %s' % (
region_kind, declared_region_kind))
cx.privileges.add(types.Privilege(node, types.Privilege.READ, None, region_type, ()))
cx.privileges.add(types.Privilege(node, types.Privilege.WRITE, None, region_type, ()))
return region_type
@type_check_node.register(ast.StatementLetArray)
def _(node, cx):
ispace_type = type_check_node(node.ispace_type, cx)
region_type = types.Region(node.name, types.RegionKind(ispace_type, None))
cx.region_forest.add(region_type)
# insert region name into scope so that element type can refer to it
cx.insert(node, node.name, region_type)
declared_region_kind = None
if node.region_kind is not None:
declared_region_kind = type_check_node(node.region_kind, cx)
element_kind = type_check_node(node.element_type, cx)
assert types.is_kind(element_kind) and not types.is_void(element_kind.type)
# now patch region type so that it refers to the contained type
region_kind = types.RegionKind(ispace_type, element_kind.type)
region_type.kind = region_kind
if declared_region_kind is None:
declared_region_kind = region_kind
if declared_region_kind != region_kind:
raise types.TypeError(node, 'Let bound expression of type %s does not match declared type %s' % (
region_kind, declared_region_kind))
cx.privileges.add(types.Privilege(node, types.Privilege.READ, None, region_type, ()))
cx.privileges.add(types.Privilege(node, types.Privilege.WRITE, None, region_type, ()))
return region_type
@type_check_node.register(ast.StatementLetIspace)
def _(node, cx):
declared_ispace_kind = None
if node.ispace_kind is not None:
declared_ispace_kind = type_check_node(node.ispace_kind, cx)
index_kind = type_check_node(node.index_type, cx)
size_type = type_check_node(node.size_expr, cx).check_read(node.size_expr, cx)
assert types.is_kind(index_kind) and types.is_int(index_kind.type)
if not types.is_int(size_type):
raise types.TypeError(node, 'Type mismatch in ispace: expected %s but got %s' % (
types.Int(), size_type))
ispace_kind = types.IspaceKind(index_kind.type)
if declared_ispace_kind is None:
declared_ispace_kind = ispace_kind
if declared_ispace_kind != ispace_kind:
raise types.TypeError(node, 'Let bound expression of type %s does not match declared type %s' % (
ispace_kind, declared_ispace_kind))
ispace_type = types.Ispace(node.name, ispace_kind)
cx.insert(node, node.name, ispace_type)
return types.Void()
@type_check_node.register(ast.StatementLetPartition)
def _(node, cx):
region_type = type_check_node(node.region_type, cx).check_read(node.region_type, cx)
mode = type_check_node(node.mode, cx)
coloring_type = type_check_node(node.coloring_expr, cx).check_read(node.coloring_expr, cx)
if not (types.is_region(region_type) or types.is_ispace(region_type)):
raise types.TypeError(node, 'Type mismatch in partition: expected a region or ispace but got %s' % (
region_type))
expected_coloring_type = types.Coloring(region_type)
if coloring_type != expected_coloring_type:
raise types.TypeError(node, 'Type mismatch in partition: expected %s but got %s' % (
expected_coloring_type, coloring_type))
partition_kind = types.PartitionKind(region_type, mode)
partition_type = types.Partition(node.name, partition_kind)
cx.insert(node, node.name, partition_type)
return partition_type
@type_check_node.register(ast.PartitionMode)
def _(node, cx):
if node.mode == 'disjoint':
return types.Partition.DISJOINT
elif node.mode == 'aliased':
return types.Partition.ALIASED
assert False
@type_check_node.register(ast.StatementReturn)
def _(node, cx):
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
if expr_type != cx.return_type:
raise types.TypeError(node, 'Returned expression of type %s does not match declared return type %s' % (
expr_type, cx.return_type))
return types.Void()
@type_check_node.register(ast.StatementUnpack)
def _(node, cx):
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
declared_kind = type_check_node(node.type, cx)
assert types.is_kind(declared_kind)
declared_type = declared_kind.type
if not types.is_struct(expr_type):
raise types.TypeError(node, 'Type mismatch in unpack: expected %s but got %s' % (
'a struct', expr_type))
region_types = type_check_node(node.regions, cx)
for region, region_type in zip(node.regions.regions, region_types):
cx.insert(node, region.name, region_type) # FIXME: handle shadowing
region_map = dict(zip(declared_type.regions, region_types))
actual_type = declared_type.instantiate_regions(region_map)
# Patch regions so that they contain the correct type.
for region_type, declared_region_type in zip(region_types, declared_type.regions):
region_type.kind = declared_region_type.kind.substitute_regions(region_map)
if expr_type != declared_type:
raise types.TypeError(node, 'Type mismatch in unpack: expected %s but got %s' % (
declared_type, expr_type))
cx.insert(node, node.name, actual_type) # FIXME: handle shadowing
cx.constraints.update(actual_type.constraints)
return region_types
@type_check_node.register(ast.UnpackRegions)
def _(node, cx):
return [type_check_node(region, cx) for region in node.regions]
@type_check_node.register(ast.UnpackRegion)
def _(node, cx):
# Create regions with empty region_types initially, patch later.
region_type = types.Region(node.name, types.RegionKind(None, None))
cx.region_forest.add(region_type)
return region_type
@type_check_node.register(ast.StatementVar)
def _(node, cx):
declared_type = None
if node.type is not None:
declared_kind = type_check_node(node.type, cx)
assert types.is_kind(declared_kind)
declared_type = declared_kind.type
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
# Hack: Rather full type inference, which gets ugly fast, just
# implement "auto-style" inference by using the expression
# type if no type declaration is provided.
if declared_type is None:
declared_type = expr_type
if not types.is_concrete(declared_type):
raise types.TypeError(node, 'Variables are not allowed to contain wildcards')
if expr_type != declared_type:
raise types.TypeError(node, 'Variable initializer of type %s does not match declared type %s' % (
expr_type, declared_type))
assert types.allows_var_binding(declared_type)
reference_type = types.StackReference(declared_type)
cx.insert(node, node.name, reference_type, shadow = True)
return types.Void()
@type_check_node.register(ast.StatementWhile)
def _(node, cx):
condition_type = type_check_node(node.condition, cx).check_read(node.condition, cx)
type_check_node(node.block, cx)
if not types.is_bool(condition_type):
raise types.TypeError(node, 'While condition expression is not type bool')
return types.Void()
@type_check_node.register(ast.ExprID)
def _(node, cx):
id_type = cx.lookup(node, node.name)
return id_type
@type_check_node.register(ast.ExprAssignment)
def _(node, cx):
lval_type = type_check_node(node.lval, cx).check_write(node.lval, cx)
rval_type = type_check_node(node.rval, cx).check_read(node.rval, cx)
if lval_type != rval_type:
raise types.TypeError(node, 'Type mismatch in assignment: %s and %s' % (
lval_type, rval_type))
return rval_type
@type_check_node.register(ast.ExprUnaryOp)
def _(node, cx):
arg_type = type_check_node(node.arg, cx).check_read(node.arg, cx)
if not unary_operator_table[node.op][0](arg_type):
raise types.TypeError(node, 'Type mismatch in operand to unary operator: %s' % (
arg_type))
return unary_operator_table[node.op][1](arg_type)
@type_check_node.register(ast.ExprBinaryOp)
def _(node, cx):
lhs_type = type_check_node(node.lhs, cx).check_read(node.lhs, cx)
rhs_type = type_check_node(node.rhs, cx).check_read(node.rhs, cx)
if lhs_type != rhs_type:
raise types.TypeError(node, 'Type mismatch in operands to binary operator: %s and %s' % (
lhs_type, rhs_type))
if not binary_operator_table[node.op][0](lhs_type):
raise types.TypeError(node, 'Type mismatch in operand to binary operator: %s' % (
lhs_type))
if not binary_operator_table[node.op][0](rhs_type):
raise types.TypeError(node, 'Type mismatch in operand to binary operator: %s' % (
rhs_type))
return binary_operator_table[node.op][1](lhs_type, rhs_type)
@type_check_node.register(ast.ExprReduceOp)
def _(node, cx):
lhs_type = type_check_node(node.lhs, cx).check_reduce(node.lhs, node.op, cx)
rhs_type = type_check_node(node.rhs, cx).check_read(node.rhs, cx)
if lhs_type != rhs_type:
raise types.TypeError(node, 'Type mismatch in operands to binary operator: %s and %s' % (
lhs_type, rhs_type))
if not reduce_operator_table[node.op](lhs_type):
raise types.TypeError(node, 'Type mismatch in operand to binary operator: %s' % (
lhs_type))
if not reduce_operator_table[node.op](rhs_type):
raise types.TypeError(node, 'Type mismatch in operand to binary operator: %s' % (
rhs_type))
return types.Void()
@type_check_node.register(ast.ExprCast)
def _(node, cx):
cast_to_kind = type_check_node(node.cast_to_type, cx)
assert types.is_kind(cast_to_kind) and types.is_numeric(cast_to_kind.type)
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
if not types.is_numeric(expr_type):
raise types.TypeError(node, 'Type mismatch in cast: expected a number but got %s' % (
expr_type))
return cast_to_kind.type
@type_check_node.register(ast.ExprNull)
def _(node, cx):
pointer_kind = type_check_node(node.pointer_type, cx)
assert types.is_kind(pointer_kind) and types.is_pointer(pointer_kind.type)
return pointer_kind.type
@type_check_node.register(ast.ExprIsnull)
def _(node, cx):
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
if not types.is_pointer(pointer_type):
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
0, 'isnull', 'a pointer', pointer_type))
return types.Bool()
@type_check_node.register(ast.ExprNew)
def _(node, cx):
pointer_kind = type_check_node(node.pointer_type, cx)
assert types.is_kind(pointer_kind) and types.is_pointer(pointer_kind.type)
pointer_type = pointer_kind.type
if len(pointer_type.regions) != 1:
raise types.TypeError(node, 'Type mismatch in new: cannot allocate pointer with more than one region %s' % (
pointer_type))
region_type = pointer_type.regions[0]
if region_type.kind.ispace is not None:
raise types.TypeError(node, 'Type mismatch in new: cannot allocate into array %s' %
region_type)
return pointer_type
@type_check_node.register(ast.ExprRead)
def _(node, cx):
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
if not types.is_pointer(pointer_type):
raise types.TypeError(node, 'Type mismatch in read: expected a pointer but got %s' % (
pointer_type))
privileges_requested = [
types.Privilege(node, types.Privilege.READ, None, region, ())
for region in pointer_type.regions]
success, failed_request = types.check_privileges(privileges_requested, cx)
if not success:
raise types.TypeError(node, 'Invalid privilege %s requested in read' % failed_request)
value_type = pointer_type.points_to_type
return value_type
@type_check_node.register(ast.ExprWrite)
def _(node, cx):
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
value_type = type_check_node(node.value_expr, cx).check_read(node.value_expr, cx)
if not types.is_pointer(pointer_type):
raise types.TypeError(node, 'Type mismatch in write: expected a pointer but got %s' % (
pointer_type))
if pointer_type.points_to_type != value_type:
raise types.TypeError(node, 'Type mismatch in write: expected %s but got %s' % (
value_type, pointer_type.points_to_type))
privileges_requested = [
types.Privilege(node, types.Privilege.WRITE, None, region, ())
for region in pointer_type.regions]
success, failed_request = types.check_privileges(privileges_requested, cx)
if not success:
raise types.TypeError(node, 'Invalid privilege %s requested in write' % failed_request)
return types.Void()
@type_check_node.register(ast.ExprReduce)
def _(node, cx):
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
value_type = type_check_node(node.value_expr, cx).check_read(node.value_expr, cx)
if not types.is_pointer(pointer_type):
raise types.TypeError(node, 'Type mismatch in reduce: expected a pointer but got %s' % (
pointer_type))
if pointer_type.points_to_type != value_type:
raise types.TypeError(node, 'Type mismatch in reduce: %s and %s' % (
pointer_type.points_to_type, value_type))
if not reduce_operator_table[node.op](pointer_type.points_to_type):
raise types.TypeError(node, 'Type mismatch in reduce: %s' % (
pointer_type.points_to_type))
if not reduce_operator_table[node.op](value_type):
raise types.TypeError(node, 'Type mismatch in reduce: %s' % (
value_type))
privileges_requested = [
types.Privilege(node, types.Privilege.REDUCE, node.op, region, ())
for region in pointer_type.regions]
success, failed_request = types.check_privileges(privileges_requested, cx)
if not success:
raise types.TypeError(node, 'Invalid privilege %s requested in reduce' % failed_request)
return types.Void()
@type_check_node.register(ast.ExprDereference)
def _(node, cx):
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
if not types.is_pointer(pointer_type):
raise types.TypeError(node, 'Type mismatch in pointer dereference: expected a pointer but got %s' % (
pointer_type))
reference_type = types.Reference(
refers_to_type = pointer_type.points_to_type,
regions = pointer_type.regions)
return reference_type
@type_check_node.register(ast.ExprArrayAccess)
def _(node, cx):
array_type = type_check_node(node.array_expr, cx).check_read(node.array_expr, cx)
index_type = type_check_node(node.index_expr, cx).check_read(node.index_expr, cx)
# Handle partitions:
if types.is_partition(array_type):
if not types.is_int(index_type):
raise types.TypeError(node, 'Type mismatch in index for partition access: expected %s but got %s' % (
types.Int(),
index_type))
# Check whether the index expression is a compile-time
# constant value. Add disjointness constraints for the
# subregion if and only if the the index is constant.
if isinstance(node.index_expr, ast.ExprConstInt):
index = node.index_expr.value
subregion_type = array_type.static_subregion(index, cx)
else:
index_expr = node.index_expr
subregion_type = array_type.dynamic_subregion(index_expr, cx)
return subregion_type
# Handle array slicing:
if types.is_region(array_type) and types.is_ispace(index_type):
if array_type.kind.ispace is None:
raise types.TypeError(node, 'Type mismatch in array slice: expected an array but got %s' % (
array_type))
# Check constraints for the index space to make sure it is
# a subset of the index space of the array.
success, failed_request = types.check_constraints(
[types.Constraint(lhs = index_type, op = types.Constraint.SUBREGION, rhs = array_type.kind.ispace)],
cx.constraints)
if not success:
raise types.TypeError(node, 'Invalid constraint %s requested in array slice' % (
'%s <= %s' % (index_type, array_type.kind.ispace)))
array_kind = types.RegionKind(index_type, array_type.kind.contains_type)
subarray_type = types.Region('%s[%s]' % (array_type, index_type), array_kind)
cx.region_forest.union(subarray_type, array_type)
return subarray_type
# Handle arrays:
if not types.is_region(array_type):
raise types.TypeError(node, 'Type mismatch in array access: expected an array but got %s' % (
array_type))
ispace = array_type.kind.ispace
if ispace is None:
raise types.TypeError(node, 'Type mismatch in array access: expected an array but got %s' % (
array_type.kind))
if ispace.kind.index_type != index_type:
raise types.TypeError(node, 'Type mismatch in index for array access: expected %s but got %s' % (
ispace.kind.index_type,
index_type))
reference_type = types.Reference(
refers_to_type = array_type.kind.contains_type,
regions = [array_type])
return reference_type
@type_check_node.register(ast.ExprFieldAccess)
def _(node, cx):
wrapper_type = type_check_node(node.struct_expr, cx)
struct_type = wrapper_type.as_read()
if not types.is_struct(struct_type):
raise types.TypeError(node, 'Type mismatch in struct field access: expected a struct but got %s' % (
struct_type))
if node.field_name not in struct_type.field_map:
raise types.TypeError(node, 'Struct %s has no field named %s' % (
struct_type, node.field_name))
return wrapper_type.get_field(node.field_name)
@type_check_node.register(ast.ExprFieldDereference)
def _(node, cx):
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
if not types.is_pointer(pointer_type):
raise types.TypeError(node, 'Type mismatch in struct field dereference: expected a pointer to a struct but got %s' % (
pointer_type))
if not types.is_struct(pointer_type.points_to_type):
raise types.TypeError(node, 'Type mismatch in struct field dereference: expected a pointer to a struct but got %s' % (
pointer_type))
if node.field_name not in pointer_type.points_to_type.field_map:
raise types.TypeError(node, 'Struct %s has no field named %s' % (
pointer_type.points_to_type, node.field_name))
return types.Reference(pointer_type.points_to_type, pointer_type.regions).get_field(node.field_name)
@type_check_node.register(ast.ExprFieldValues)
def _(node, cx):
field_values = type_check_node(node.field_values, cx)
field_map = OrderedDict()
for field_name, value_type in field_values:
field_map[field_name] = value_type
struct_type = types.Struct(None, [], [], set(), field_map)
return struct_type
@type_check_node.register(ast.FieldValues)
def _(node, cx):
return [type_check_node(field_value, cx)
for field_value in node.field_values]
@type_check_node.register(ast.FieldValue)
def _(node, cx):
return (
node.field_name,
type_check_node(node.value_expr, cx).check_read(node.value_expr, cx))
@type_check_node.register(ast.ExprFieldUpdates)
def _(node, cx):
struct_type = type_check_node(node.struct_expr, cx).check_read(node.struct_expr, cx)
field_updates = type_check_node(node.field_updates, cx)
if not types.is_struct(struct_type):
raise types.TypeError(node, 'Type mismatch in struct field updates: expected a struct but got %s' % (
struct_type))
all_fields_match = True
for field_name, update_type in field_updates:
assert field_name in struct_type.field_map
if update_type != struct_type.field_map[field_name]:
all_fields_match = False
if all_fields_match:
new_struct_type = struct_type
else:
new_field_map = struct_type.field_map.copy()
for field_name, update_type in field_updates:
new_field_map[field_name] = update_type
new_struct_type = types.Struct(None, [], [], set(), new_field_map)
return new_struct_type
@type_check_node.register(ast.FieldUpdates)
def _(node, cx):
return [type_check_node(field_update, cx)
for field_update in node.field_updates]
@type_check_node.register(ast.FieldUpdate)
def _(node, cx):
return (
node.field_name,
type_check_node(node.update_expr, cx).check_read(node.update_expr, cx))
@type_check_node.register(ast.ExprColoring)
def _(node, cx):
region_type = type_check_node(node.region, cx).check_read(node.region, cx)
if not (types.is_region(region_type) or types.is_ispace(region_type)):
raise types.TypeError(node, 'Type mismatch in coloring: expected a region or ispace but got %s' % (
region_type))
return types.Coloring(region_type)
@type_check_node.register(ast.ColoringRegion)
def _(node, cx):
return cx.lookup(node, node.name)
@type_check_node.register(ast.ExprColor)
def _(node, cx):
coloring_type = type_check_node(node.coloring_expr, cx).check_read(node.coloring_expr, cx)
pointer_type = type_check_node(node.pointer_expr, cx).check_read(node.pointer_expr, cx)
color_type = type_check_node(node.color_expr, cx).check_read(node.color_expr, cx)
if not types.is_coloring(coloring_type):
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
0, 'color', 'a coloring', coloring_type))
if types.is_region(coloring_type.region):
expected_pointer_type = types.Pointer(
coloring_type.region.kind.contains_type,
[coloring_type.region])
elif types.is_ispace(coloring_type.region):
expected_pointer_type = coloring_type.region.kind.index_type
else:
assert False
if pointer_type != expected_pointer_type:
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
1, 'color', expected_pointer_type, pointer_type))
if not types.is_int(color_type):
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
2, 'color', types.Int(), color_type))
return coloring_type
@type_check_node.register(ast.ExprUpregion)
def _(node, cx):
region_types = type_check_node(node.regions, cx)
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
for index, region_type in zip(xrange(len(region_types)), region_types):
if not types.is_region(region_type):
raise types.TypeError(node, 'Type mismatch for type argument %s in call to task %s: expected %s but got %s' % (
index, 'upregion', 'a region', region_type))
if not types.is_pointer(expr_type):
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
index, 'upregion', 'a pointer', expr_type))
for expr_region in expr_type.regions:
subregion = False
for region_type in region_types:
success, failed_request = types.check_constraints(
[types.Constraint(lhs = expr_region, op = types.Constraint.SUBREGION, rhs = region_type)],
cx.constraints)
if success:
subregion = True
break
if not subregion:
raise types.TypeError(node, 'Invalid constraint %s requested in upregion expression' % (
'%s <= %s' % (expr_region, region_type)))
return types.Pointer(expr_type.points_to_type, region_types)
@type_check_node.register(ast.UpregionRegions)
def _(node, cx):
return [type_check_node(region, cx).check_read(region, cx)
for region in node.regions]
@type_check_node.register(ast.UpregionRegion)
def _(node, cx):
return cx.lookup(node, node.name)
@type_check_node.register(ast.ExprDownregion)
def _(node, cx):
region_types = type_check_node(node.regions, cx)
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
for index, region_type in zip(xrange(len(region_types)), region_types):
if not types.is_region(region_type):
raise types.TypeError(node, 'Type mismatch for type argument %s in call to task %s: expected %s but got %s' % (
index, 'downregion', 'a region', region_type))
if not types.is_pointer(expr_type):
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
index, 'downregion', 'a pointer', expr_type))
return types.Pointer(expr_type.points_to_type, region_types)
@type_check_node.register(ast.DownregionRegions)
def _(node, cx):
return [type_check_node(region, cx).check_read(region, cx)
for region in node.regions]
@type_check_node.register(ast.DownregionRegion)
def _(node, cx):
return cx.lookup(node, node.name)
@type_check_node.register(ast.ExprPack)
def _(node, cx):
declared_kind = type_check_node(node.type, cx)
assert types.is_kind(declared_kind)
declared_type = declared_kind.type
region_types = type_check_node(node.regions, cx)
actual_type = declared_type.instantiate_regions(dict(zip(declared_type.regions, region_types)))
expr_type = type_check_node(node.expr, cx).check_read(node.expr, cx)
if expr_type != actual_type:
raise types.TypeError(node, 'Type mismatch in pack: expected %s but got %s' % (
actual_type, expr_type))
success, failed_request = types.check_constraints(actual_type.constraints, cx.constraints)
if not success:
raise types.TypeError(node, 'Invalid constraint %s requested in pack expression' % (
failed_request))
return declared_type
@type_check_node.register(ast.PackRegions)
def _(node, cx):
return [type_check_node(region, cx) for region in node.regions]
@type_check_node.register(ast.PackRegion)
def _(node, cx):
region_type = cx.lookup(node, node.name)
assert types.is_region(region_type)
return region_type
@type_check_node.register(ast.ExprCall)
def _(node, cx):
fn_type = type_check_node(node.function, cx).check_read(node.function, cx)
assert types.is_function(fn_type)
function_name = node.function.name
arg_types = type_check_node(node.args, cx)
region_map = dict(
[(param, arg)
for param, arg in zip(fn_type.param_types, arg_types)
if (types.is_region(param) and types.is_region(arg))
or (types.is_ispace(param) and types.is_ispace(arg))])
param_types = [t.substitute_regions(region_map) for t in fn_type.param_types]
privileges_requested = [t.substitute_regions(region_map) for t in fn_type.privileges]
return_type = fn_type.return_type.substitute_regions(region_map)
if len(param_types) != len(arg_types):
raise types.TypeError(node, 'Incorrect number of arguments for call to task %s: expected %s but got %s' % (
function_name, len(param_types), len(arg_types)))
for param_type, arg_type, index in zip(param_types, arg_types, xrange(len(param_types))):
if types.is_ispace(param_type):
if not types.is_ispace(arg_type) or param_type.kind != arg_type.kind:
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
index, function_name, param_type.kind, arg_type))
elif types.is_region(param_type):
# First check that both are regions.
if not types.is_region(arg_type):
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
index, function_name, param_type.kind, arg_type))
# Then check that the regions contains compatible types.
param_kind = param_type.kind.substitute_regions(region_map)
arg_kind = arg_type.kind
if param_kind != arg_kind:
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
index, function_name, param_kind, arg_kind))
elif param_type != arg_type:
raise types.TypeError(node, 'Type mismatch for argument %s in call to task %s: expected %s but got %s' % (
index, function_name, param_type, arg_type))
success, failed_request = types.check_privileges(privileges_requested, cx)
if not success:
raise types.TypeError(node, 'Invalid privilege %s requested in call to task %s' % (
failed_request, function_name))
return return_type
@type_check_node.register(ast.Args)
def _(node, cx):
return [type_check_node(arg, cx).check_read(arg, cx)
for arg in node.args]
@type_check_node.register(ast.ExprConstBool)
def _(node, cx):
return types.Bool()
@type_check_node.register(ast.ExprConstDouble)
def _(node, cx):
return types.Double()
@type_check_node.register(ast.ExprConstFloat)
def _(node, cx):
return types.Float()
@type_check_node.register(ast.ExprConstInt)
def _(node, cx):
return types.Int()
@type_check_node.register(ast.ExprConstUInt)
def _(node, cx):
return types.UInt()
def type_check(program, opts):
cx = types.Context(opts)
type_check_node(program, cx)
return cx.type_map, cx.constraints, cx.foreign_types
|
|
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from nose.tools import raises
from menpo.testing import is_same_array
from menpo.image import BooleanImage, MaskedImage, Image
@raises(ValueError)
def test_create_1d_error():
Image(np.ones(1))
def test_image_n_elements():
image = Image(np.ones((3, 10, 10)))
assert(image.n_elements == 3 * 10 * 10)
def test_image_width():
image = Image(np.ones((3, 6, 4)))
assert(image.width == 4)
def test_image_height():
image = Image(np.ones((3, 6, 4)))
assert(image.height == 6)
def test_image_blank():
image = Image(np.zeros((1, 6, 4)))
image_blank = Image.init_blank((6, 4))
assert(np.all(image_blank.pixels == image.pixels))
def test_image_blank_fill():
image = Image(np.ones((1, 6, 4)) * 7)
image_blank = Image.init_blank((6, 4), fill=7)
assert(np.all(image_blank.pixels == image.pixels))
def test_image_blank_n_channels():
image = Image(np.zeros((7, 6, 4)))
image_blank = Image.init_blank((6, 4), n_channels=7)
assert(np.all(image_blank.pixels == image.pixels))
def test_image_centre():
pixels = np.ones((1, 10, 20))
image = Image(pixels)
assert(np.all(image.centre() == np.array([5, 10])))
def test_image_str_shape_4d():
pixels = np.ones((1, 10, 20, 11, 12))
image = Image(pixels)
assert(image._str_shape() == '10 x 20 x 11 x 12')
def test_image_str_shape_2d():
pixels = np.ones((1, 10, 20))
image = Image(pixels)
assert(image._str_shape() == '20W x 10H')
def test_image_as_vector():
pixels = np.random.rand(1, 10, 20)
image = Image(pixels)
assert(np.all(image.as_vector() == pixels.ravel()))
def test_image_as_vector_keep_channels():
pixels = np.random.rand(2, 10, 20)
image = Image(pixels)
assert(np.all(image.as_vector(keep_channels=True) ==
pixels.reshape([2, -1])))
def test_image_from_vector():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(2, 10, 20)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel())
assert(np.all(image2.pixels == pixels2))
def test_image_from_vector_custom_channels():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(3, 10, 20)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), n_channels=3)
assert(np.all(image2.pixels == pixels2))
def test_image_from_vector_no_copy():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(2, 10, 20)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), copy=False)
assert(is_same_array(image2.pixels, pixels2))
def test_image_from_vector_inplace_no_copy():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(2, 10, 20)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel(), copy=False)
assert(is_same_array(image.pixels, pixels2))
def test_image_from_vector_inplace_no_copy_warning():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(2, 10, 20)
image = Image(pixels)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
image.from_vector_inplace(pixels2.ravel()[::-1], copy=False)
assert len(w) == 1
def test_image_from_vector_inplace_copy_default():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(2, 10, 20)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel())
assert(not is_same_array(image.pixels, pixels2))
def test_image_from_vector_inplace_copy_explicit():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(2, 10, 20)
image = Image(pixels)
image.from_vector_inplace(pixels2.ravel(), copy=True)
assert(not is_same_array(image.pixels, pixels2))
def test_image_from_vector_custom_channels_no_copy():
pixels = np.random.rand(2, 10, 20)
pixels2 = np.random.rand(3, 10, 20)
image = Image(pixels)
image2 = image.from_vector(pixels2.ravel(), n_channels=3, copy=False)
assert(is_same_array(image2.pixels, pixels2))
@raises(ValueError)
def test_boolean_image_wrong_round():
BooleanImage.init_blank((12, 12), round='ads')
def test_boolean_image_proportion_true():
image = BooleanImage.init_blank((10, 10))
image.pixels[0, :7] = False
assert(image.proportion_true() == 0.3)
def test_boolean_image_proportion_false():
image = BooleanImage.init_blank((10, 10))
image.pixels[0, :7] = False
assert(image.proportion_false() == 0.7)
def test_boolean_image_proportion_sums():
image = BooleanImage.init_blank((10, 10))
image.pixels[0, :7] = False
assert(image.proportion_true() + image.proportion_false() == 1)
def test_boolean_image_false_indices():
image = BooleanImage.init_blank((2, 3))
image.pixels[0, 0, 1] = False
image.pixels[0, 1, 2] = False
assert(np.all(image.false_indices() == np.array([[0, 1],
[1, 2]])))
def test_boolean_image_str():
image = BooleanImage.init_blank((2, 3))
assert(image.__str__() == '3W x 2H 2D mask, 100.0% of which is True')
def test_boolean_image_from_vector():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.init_blank((4, 4))
image2 = image.from_vector(vector)
assert(np.all(image2.as_vector() == vector))
def test_boolean_image_from_vector_no_copy():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.init_blank((4, 4))
image2 = image.from_vector(vector, copy=False)
assert(is_same_array(image2.pixels.ravel(), vector))
def test_boolean_image_from_vector_no_copy_raises():
vector = np.zeros(16, dtype=np.bool)
image = BooleanImage.init_blank((4, 4))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
image.from_vector(vector[::-1], copy=False)
assert len(w) == 1
def test_boolean_image_invert_inplace():
image = BooleanImage.init_blank((4, 4))
image.invert_inplace()
assert(np.all(~image.pixels))
def test_boolean_image_invert_inplace_double_noop():
image = BooleanImage.init_blank((4, 4))
image.invert_inplace()
image.invert_inplace()
assert(np.all(image.pixels))
def test_boolean_image_invert():
image = BooleanImage.init_blank((4, 4))
image2 = image.invert()
assert(np.all(image.pixels))
assert(np.all(~image2.pixels))
def test_boolean_bounds_false():
mask = BooleanImage.init_blank((8, 8), fill=True)
mask.pixels[0, 1, 2] = False
mask.pixels[0, 5, 4] = False
mask.pixels[0, 3:2, 3] = False
min_b, max_b = mask.bounds_false()
assert(np.all(min_b == np.array([1, 2])))
assert(np.all(max_b == np.array([5, 4])))
@raises(TypeError)
def test_boolean_prevent_order_kwarg():
mask = BooleanImage.init_blank((8, 8), fill=True)
mask.warp_to_mask(mask, None, order=4)
def test_create_image_copy_false():
pixels = np.ones((1, 100, 100))
image = Image(pixels, copy=False)
assert (is_same_array(image.pixels, pixels))
def test_create_image_copy_true():
pixels = np.ones((1, 100, 100))
image = Image(pixels)
assert (not is_same_array(image.pixels, pixels))
def test_create_image_copy_false_not_c_contiguous():
pixels = np.ones((1, 100, 100), order='F')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Image(pixels, copy=False)
assert(len(w) == 1)
def mask_image_3d_test():
mask_shape = (13, 120, 121)
mask_region = np.ones(mask_shape)
return BooleanImage(mask_region)
def test_mask_creation_basics():
mask_shape = (3, 120, 121)
mask_region = np.ones(mask_shape)
mask = BooleanImage(mask_region)
assert_equal(mask.n_channels, 1)
assert_equal(mask.n_dims, 3)
assert_equal(mask.shape, mask_shape)
def test_mask_blank():
mask = BooleanImage.init_blank((3, 56, 12))
assert (np.all(mask.pixels))
def test_boolean_copy_false_boolean():
mask = np.zeros((10, 10), dtype=np.bool)
boolean_image = BooleanImage(mask, copy=False)
assert (is_same_array(boolean_image.pixels, mask))
def test_boolean_copy_true():
mask = np.zeros((10, 10), dtype=np.bool)
boolean_image = BooleanImage(mask)
assert (not is_same_array(boolean_image.pixels, mask))
def test_boolean_copy_false_non_boolean():
mask = np.zeros((10, 10))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
BooleanImage(mask, copy=False)
assert(len(w) == 1)
def test_mask_blank_rounding_floor():
mask = BooleanImage.init_blank((56.1, 12.1), round='floor')
assert_allclose(mask.shape, (56, 12))
def test_mask_blank_rounding_ceil():
mask = BooleanImage.init_blank((56.1, 12.1), round='ceil')
assert_allclose(mask.shape, (57, 13))
def test_mask_blank_rounding_round():
mask = BooleanImage.init_blank((56.1, 12.6), round='round')
assert_allclose(mask.shape, (56, 13))
def test_mask_blank_false_fill():
mask = BooleanImage.init_blank((3, 56, 12), fill=False)
assert (np.all(~mask.pixels))
def test_mask_n_true_n_false():
mask = BooleanImage.init_blank((64, 14), fill=False)
assert_equal(mask.n_true(), 0)
assert_equal(mask.n_false(), 64 * 14)
mask.mask[0, 0] = True
mask.mask[9, 13] = True
assert_equal(mask.n_true(), 2)
assert_equal(mask.n_false(), 64 * 14 - 2)
def test_mask_true_indices():
mask = BooleanImage.init_blank((64, 14, 51), fill=False)
mask.mask[0, 2, 5] = True
mask.mask[5, 13, 4] = True
true_indices = mask.true_indices()
true_indices_test = np.array([[0, 2, 5], [5, 13, 4]])
assert_equal(true_indices, true_indices_test)
def test_mask_false_indices():
mask = BooleanImage.init_blank((64, 14, 51), fill=True)
mask.mask[0, 2, 5] = False
mask.mask[5, 13, 4] = False
false_indices = mask.false_indices()
false_indices_test = np.array([[0, 2, 5], [5, 13, 4]])
assert_equal(false_indices, false_indices_test)
def test_mask_true_bounding_extent():
mask = BooleanImage.init_blank((64, 14, 51), fill=False)
mask.mask[0, 13, 5] = True
mask.mask[5, 2, 4] = True
tbe = mask.bounds_true()
true_extends_mins = np.array([0, 2, 4])
true_extends_maxs = np.array([5, 13, 5])
assert_equal(tbe[0], true_extends_mins)
assert_equal(tbe[1], true_extends_maxs)
def test_3channel_image_creation():
pixels = np.ones((3, 120, 120))
MaskedImage(pixels)
def test_no_channels_image_creation():
pixels = np.ones((120, 120))
MaskedImage(pixels)
def test_create_MaskedImage_copy_false_mask_array():
pixels = np.ones((1, 100, 100))
mask = np.ones((100, 100), dtype=np.bool)
image = MaskedImage(pixels, mask=mask, copy=False)
assert (is_same_array(image.pixels, pixels))
assert (is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_false_mask_BooleanImage():
pixels = np.ones((1, 100, 100))
mask = np.ones((100, 100), dtype=np.bool)
mask_image = BooleanImage(mask, copy=False)
image = MaskedImage(pixels, mask=mask_image, copy=False)
assert (is_same_array(image.pixels, pixels))
assert (is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_true_mask_array():
pixels = np.ones((100, 100))
mask = np.ones((100, 100), dtype=np.bool)
image = MaskedImage(pixels, mask=mask)
assert (not is_same_array(image.pixels, pixels))
assert (not is_same_array(image.mask.pixels, mask))
def test_create_MaskedImage_copy_true_mask_BooleanImage():
pixels = np.ones((1, 100, 100))
mask = np.ones((100, 100), dtype=np.bool)
mask_image = BooleanImage(mask, copy=False)
image = MaskedImage(pixels, mask=mask_image, copy=True)
assert (not is_same_array(image.pixels, pixels))
assert (not is_same_array(image.mask.pixels, mask))
def test_2d_crop_without_mask():
pixels = np.ones((3, 120, 120))
im = MaskedImage(pixels)
cropped_im = im.crop([10, 50], [20, 60])
assert (cropped_im.shape == (10, 10))
assert (cropped_im.n_channels == 3)
assert (np.alltrue(cropped_im.shape))
def test_2d_crop_with_mask():
pixels = np.ones((3, 120, 120))
mask = np.zeros_like(pixels[0, ...])
mask[10:100, 20:30] = 1
im = MaskedImage(pixels, mask=mask)
cropped_im = im.crop([0, 0], [20, 60])
assert (cropped_im.shape == (20, 60))
assert (np.alltrue(cropped_im.shape))
def test_normalize_std_image():
pixels = np.ones((3, 120, 120))
pixels[0] = 0.5
pixels[1] = 0.2345
image = Image(pixels)
image.normalize_std_inplace()
assert_allclose(np.mean(image.pixels), 0, atol=1e-10)
assert_allclose(np.std(image.pixels), 1)
def test_normalize_norm_image():
pixels = np.ones((3, 120, 120))
pixels[0] = 0.5
pixels[1] = 0.2345
image = Image(pixels)
image.normalize_norm_inplace()
assert_allclose(np.mean(image.pixels), 0, atol=1e-10)
assert_allclose(np.linalg.norm(image.pixels), 1)
@raises(ValueError)
def test_normalize_std_no_variance_exception():
pixels = np.ones((3, 120, 120))
pixels[0] = 0.5
pixels[1] = 0.2345
image = MaskedImage(pixels)
image.normalize_std_inplace(mode='per_channel')
@raises(ValueError)
def test_normalize_norm_zero_norm_exception():
pixels = np.zeros((3, 120, 120))
image = MaskedImage(pixels)
image.normalize_norm_inplace(mode='per_channel')
def test_normalize_std_masked_per_channel():
pixels = np.random.randn(3, 120, 120)
pixels[0] *= 7
pixels[1] += -14
pixels[2] /= 130
image = MaskedImage(pixels)
image.normalize_std_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=1), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=1), 1)
def test_normalize_std_image_per_channel():
pixels = np.random.randn(3, 120, 120)
pixels[1] *= 9
pixels[0] += -3
pixels[2] /= 140
image = Image(pixels)
image.normalize_std_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=1), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=1), 1)
def test_normalize_norm_image_per_channel():
pixels = np.random.randn(3, 120, 120)
pixels[1] *= 17
pixels[0] += -114
pixels[2] /= 30
image = Image(pixels)
image.normalize_norm_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=1), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=1), 1)
def test_normalize_norm_masked_per_channel():
pixels = np.random.randn(3, 120, 120)
pixels[1] *= 7
pixels[0] += -14
pixels[2] /= 130
image = MaskedImage(pixels)
image.normalize_norm_inplace(mode='per_channel')
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=1), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=1), 1)
def test_normalize_std_masked():
pixels = np.random.randn(3, 120, 120)
pixels[1] *= 7
pixels[0] += -14
pixels[2] /= 130
mask = np.zeros((120, 120))
mask[30:50, 20:30] = 1
image = MaskedImage(pixels, mask=mask)
image.normalize_std_inplace(mode='per_channel', limit_to_mask=True)
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=1), 0, atol=1e-10)
assert_allclose(
np.std(image.as_vector(keep_channels=True), axis=1), 1)
def test_normalize_norm_masked():
pixels = np.random.randn(3, 120, 120)
pixels[1] *= 7
pixels[0] += -14
pixels[2] /= 130
mask = np.zeros((120, 120))
mask[30:50, 20:30] = 1
image = MaskedImage(pixels, mask=mask)
image.normalize_norm_inplace(mode='per_channel', limit_to_mask=True)
assert_allclose(
np.mean(image.as_vector(keep_channels=True), axis=1), 0, atol=1e-10)
assert_allclose(
np.linalg.norm(image.as_vector(keep_channels=True), axis=1), 1)
def test_rescale_single_num():
image = MaskedImage(np.random.randn(3, 120, 120))
new_image = image.rescale(0.5)
assert_allclose(new_image.shape, (60, 60))
def test_rescale_tuple():
image = MaskedImage(np.random.randn(3, 120, 120))
new_image = image.rescale([0.5, 2.0])
assert_allclose(new_image.shape, (60, 240))
@raises(ValueError)
def test_rescale_negative():
image = MaskedImage(np.random.randn(3, 120, 120))
image.rescale([0.5, -0.5])
@raises(ValueError)
def test_rescale_negative_single_num():
image = MaskedImage(np.random.randn(3, 120, 120))
image.rescale(-0.5)
def test_rescale_boundaries_interpolation():
image = MaskedImage(np.random.randn(3, 60, 60))
for i in [x * 0.1 for x in range(1, 31)]:
image_rescaled = image.rescale(i)
assert_allclose(image_rescaled.mask.proportion_true(), 1.0)
def test_resize():
image = MaskedImage(np.random.randn(3, 120, 120))
new_size = (250, 250)
new_image = image.resize(new_size)
assert_allclose(new_image.shape, new_size)
def test_as_greyscale_luminosity():
ones = np.ones([3, 120, 120])
image = MaskedImage(ones)
image.pixels[0] *= 0.5
new_image = image.as_greyscale(mode='luminosity')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
assert_allclose(new_image.pixels[0], ones[0] * 0.850532)
def test_rolled_channels():
ones = np.ones([3, 120, 120])
image = MaskedImage(ones)
rolled_channels = image.rolled_channels()
assert rolled_channels.shape == (120, 120, 3)
def test_as_greyscale_average():
ones = np.ones([3, 120, 120])
image = MaskedImage(ones)
image.pixels[0] *= 0.5
new_image = image.as_greyscale(mode='average')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
assert_allclose(new_image.pixels[0], ones[0] * 0.83333333)
@raises(ValueError)
def test_as_greyscale_channels_no_index():
image = MaskedImage(np.ones([3, 120, 120]))
new_image = image.as_greyscale(mode='channel')
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
def test_as_greyscale_channels():
image = MaskedImage(np.random.randn(3, 120, 120))
new_image = image.as_greyscale(mode='channel', channel=0)
assert (new_image.shape == image.shape)
assert (new_image.n_channels == 1)
assert_allclose(new_image.pixels[0], image.pixels[0])
def test_as_pil_image_1channel():
im = MaskedImage(np.ones((1, 120, 120)))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
@raises(ValueError)
def test_as_pil_image_bad_range():
im = MaskedImage(np.random.randn(1, 120, 120))
im.as_PILImage()
def test_as_pil_image_float32():
im = MaskedImage(np.ones((1, 120, 120)).astype(np.float32))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
def test_as_pil_image_bool():
im = BooleanImage(np.ones((120, 120), dtype=np.bool))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
im.pixels.astype(np.uint8) * 255)
def test_as_pil_image_uint8():
im = Image(np.ones((120, 120), dtype=np.uint8))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
im.pixels)
def test_as_pil_image_3channels():
im = MaskedImage(np.ones((3, 120, 120)))
new_im = im.as_PILImage()
assert_allclose(np.asarray(new_im.getdata()).reshape(im.pixels.shape),
(im.pixels * 255).astype(np.uint8))
def test_image_extract_channels():
image = Image(np.random.rand(3, 120, 120))
extracted = image.extract_channels(0)
assert_equal(extracted.pixels, image.pixels[[0], ...])
def test_image_extract_channels_multiple():
image = Image(np.random.rand(3, 120, 120))
extracted = image.extract_channels([0, 2])
assert_equal(extracted.pixels[0], image.pixels[0])
assert_equal(extracted.pixels[1], image.pixels[2])
def test_image_extract_channels_multiple_reversed():
image = Image(np.random.rand(3, 120, 120))
extracted = image.extract_channels([2, 0])
assert_equal(extracted.pixels[0], image.pixels[2])
assert_equal(extracted.pixels[1], image.pixels[0])
def test_diagonal_greyscale():
image = Image.init_blank((100, 250), n_channels=1)
assert image.diagonal() == (100 ** 2 + 250 ** 2) ** 0.5
def test_diagonal_color():
image = Image.init_blank((100, 250), n_channels=3)
assert image.diagonal() == (100 ** 2 + 250 ** 2) ** 0.5
def test_diagonal_greyscale_ndim():
image = Image.init_blank((100, 250, 50), n_channels=1)
assert image.diagonal() == (100 ** 2 + 250 ** 2 + 50 ** 2) ** 0.5
def test_diagonal_kchannel_ndim():
image = Image.init_blank((100, 250, 50), n_channels=5)
assert image.diagonal() == (100 ** 2 + 250 ** 2 + 50 ** 2) ** 0.5
def test_rescale_to_diagonal():
image = Image.init_blank((8, 6), n_channels=2)
assert image.diagonal() == 10
rescaled = image.rescale_to_diagonal(5)
assert rescaled.shape == (4, 3)
assert rescaled.n_channels == 2
|
|
from ConfigParser import ConfigParser
import errno
import logging
import os
class AirflowConfigException(Exception):
pass
defaults = {
'core': {
'unit_test_mode': False,
'parallelism': 32,
'load_examples': True,
'plugins_folder': None,
},
'webserver': {
'base_url': 'http://localhost:8080',
'web_server_host': '0.0.0.0',
'web_server_port': '8080',
'authenticate': False,
'demo_mode': False,
'secret_key': 'airflowified',
'expose_config': False,
},
'scheduler': {
'statsd_on': False,
'statsd_host': 'localhost',
'statsd_port': 8125,
'statsd_prefix': 'airflow',
'job_heartbeat_sec': 5,
'scheduler_heartbeat_sec': 60,
'authenticate': False,
},
'celery': {
'default_queue': 'default',
},
'smtp': {
'smtp_starttls': True,
},
}
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where you airflow pipelines live, most likely a
# subfolder in a code repository
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
base_log_folder = {AIRFLOW_HOME}/logs
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is use in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Secret key used to run your flask app
secret_key = temporary_key
# Expose the configuration file in the web server
expose_config = true
[smtp]
# If you want airflow to send emails on retries, failure, and you want to
# the airflow.utils.send_email function, you have to configure an smtp
# server here
smtp_host = localhost
smtp_starttls = True
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the port that Celery Flower runs on
flower_port = 8383
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# Statsd (https://github.com/etsy/statsd) integration settings
# statsd_on = False
# statsd_host = localhost
# statsd_port = 8125
# statsd_prefix = airflow
"""
TEST_CONFIG = """\
[core]
airflow_home = {AIRFLOW_HOME}
dags_folder = {AIRFLOW_HOME}/dags
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
unit_test_mode = True
load_examples = True
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
"""
class ConfigParserWithDefaults(ConfigParser):
def __init__(self, defaults, *args, **kwargs):
self.defaults = defaults
ConfigParser.__init__(self, *args, **kwargs)
def get(self, section, key):
section = str(section).lower()
key = str(key).lower()
d = self.defaults
try:
return ConfigParser.get(self, section, key)
except:
if section not in d or key not in d[section]:
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
else:
return d[section][key]
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val == "true":
return True
elif val == "false":
return False
else:
raise AirflowConfigException("Not a boolean.")
def getint(self, section, key):
return int(self.get(section, key))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
"""
Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
"~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
"""
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = os.path.expanduser('~/airflow')
else:
AIRFLOW_HOME = os.path.expanduser(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(os.path.expanduser('~/airflow.cfg')):
AIRFLOW_CONFIG = os.path.expanduser('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = os.environ['AIRFLOW_CONFIG']
if not os.path.isfile(AIRFLOW_CONFIG):
"""
These configuration options are used to generate a default configuration
when it is missing. The right way to change your configuration is to alter
your configuration file, not this code.
"""
logging.info("Creating new config file in: " + AIRFLOW_CONFIG)
f = open(AIRFLOW_CONFIG, 'w')
f.write(DEFAULT_CONFIG.format(**locals()))
f.close()
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new config file in: " + TEST_CONFIG_FILE)
f = open(TEST_CONFIG_FILE, 'w')
f.write(TEST_CONFIG.format(**locals()))
f.close()
logging.info("Reading the config from " + AIRFLOW_CONFIG)
def test_mode():
conf = ConfigParserWithDefaults(defaults)
conf.read(TEST_CONFIG)
conf = ConfigParserWithDefaults(defaults)
conf.read(AIRFLOW_CONFIG)
|
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import unittest
from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError
import azurelinuxagent.common.utils.restutil as restutil
from azurelinuxagent.common.utils.restutil import HTTP_USER_AGENT
from azurelinuxagent.common.future import httpclient, ustr
from tests.tools import AgentTestCase, call, Mock, MagicMock, patch
class TestIOErrorCounter(AgentTestCase):
def test_increment_hostplugin(self):
restutil.IOErrorCounter.reset()
restutil.IOErrorCounter.set_protocol_endpoint()
restutil.IOErrorCounter.increment(
restutil.KNOWN_WIRESERVER_IP, restutil.HOST_PLUGIN_PORT)
counts = restutil.IOErrorCounter.get_and_reset()
self.assertEqual(1, counts["hostplugin"])
self.assertEqual(0, counts["protocol"])
self.assertEqual(0, counts["other"])
def test_increment_protocol(self):
restutil.IOErrorCounter.reset()
restutil.IOErrorCounter.set_protocol_endpoint()
restutil.IOErrorCounter.increment(
restutil.KNOWN_WIRESERVER_IP, 80)
counts = restutil.IOErrorCounter.get_and_reset()
self.assertEqual(0, counts["hostplugin"])
self.assertEqual(1, counts["protocol"])
self.assertEqual(0, counts["other"])
def test_increment_other(self):
restutil.IOErrorCounter.reset()
restutil.IOErrorCounter.set_protocol_endpoint()
restutil.IOErrorCounter.increment(
'169.254.169.254', 80)
counts = restutil.IOErrorCounter.get_and_reset()
self.assertEqual(0, counts["hostplugin"])
self.assertEqual(0, counts["protocol"])
self.assertEqual(1, counts["other"])
def test_get_and_reset(self):
restutil.IOErrorCounter.reset()
restutil.IOErrorCounter.set_protocol_endpoint()
restutil.IOErrorCounter.increment(
restutil.KNOWN_WIRESERVER_IP, restutil.HOST_PLUGIN_PORT)
restutil.IOErrorCounter.increment(
restutil.KNOWN_WIRESERVER_IP, restutil.HOST_PLUGIN_PORT)
restutil.IOErrorCounter.increment(
restutil.KNOWN_WIRESERVER_IP, 80)
restutil.IOErrorCounter.increment(
'169.254.169.254', 80)
restutil.IOErrorCounter.increment(
'169.254.169.254', 80)
counts = restutil.IOErrorCounter.get_and_reset()
self.assertEqual(2, counts.get("hostplugin"))
self.assertEqual(1, counts.get("protocol"))
self.assertEqual(2, counts.get("other"))
self.assertEqual(
{"hostplugin":0, "protocol":0, "other":0},
restutil.IOErrorCounter._counts)
class TestHttpOperations(AgentTestCase):
def test_parse_url(self):
test_uri = "http://abc.def/ghi#hash?jkl=mn"
host, port, secure, rel_uri = restutil._parse_url(test_uri)
self.assertEquals("abc.def", host)
self.assertEquals("/ghi#hash?jkl=mn", rel_uri)
test_uri = "http://abc.def/"
host, port, secure, rel_uri = restutil._parse_url(test_uri)
self.assertEquals("abc.def", host)
self.assertEquals("/", rel_uri)
self.assertEquals(False, secure)
test_uri = "https://abc.def/ghi?jkl=mn"
host, port, secure, rel_uri = restutil._parse_url(test_uri)
self.assertEquals(True, secure)
test_uri = "http://abc.def:80/"
host, port, secure, rel_uri = restutil._parse_url(test_uri)
self.assertEquals("abc.def", host)
host, port, secure, rel_uri = restutil._parse_url("")
self.assertEquals(None, host)
self.assertEquals(rel_uri, "")
host, port, secure, rel_uri = restutil._parse_url("None")
self.assertEquals(None, host)
self.assertEquals(rel_uri, "None")
def test_cleanup_sas_tokens_from_urls_for_normal_cases(self):
test_url = "http://abc.def/ghi#hash?jkl=mn"
filtered_url = restutil.redact_sas_tokens_in_urls(test_url)
self.assertEquals(test_url, filtered_url)
test_url = "http://abc.def:80/"
filtered_url = restutil.redact_sas_tokens_in_urls(test_url)
self.assertEquals(test_url, filtered_url)
test_url = "http://abc.def/"
filtered_url = restutil.redact_sas_tokens_in_urls(test_url)
self.assertEquals(test_url, filtered_url)
test_url = "https://abc.def/ghi?jkl=mn"
filtered_url = restutil.redact_sas_tokens_in_urls(test_url)
self.assertEquals(test_url, filtered_url)
def test_cleanup_sas_tokens_from_urls_containing_sas_tokens(self):
# Contains pair of URLs (RawURL, RedactedURL)
urls_tuples = [("https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig"
"=sXBjML1Fpk9UnTBtajo05ZTFSk0LWFGvARZ6WlVcAog%3D&srt=o&ss=b&"
"spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00%3A21%3A38Z&"
"st=2017-07-01T23%3A16%3A38Z",
"https://abc.def.xyz.123.net/functiontest/yokawasa.png?sig"
"=" + restutil.REDACTED_TEXT +
"&srt=o&ss=b&spr=https&sp=rl&sv=2016-05-31&se=2017-07-01T00"
"%3A21%3A38Z&st=2017-07-01T23%3A16%3A38Z"),
("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07"
"-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=DavQgRtl99DsEPv9Xeb63GnLXCuaLYw5ay%2BE1cFckQY%3D",
"https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se"
"=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=" + restutil.REDACTED_TEXT),
("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07"
"-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=ttSCKmyjiDEeIzT9q7HtYYgbCRIXuesFSOhNEab52NM%3D",
"https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se"
"=2018-07-26T02:20:44Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=" + restutil.REDACTED_TEXT),
("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07"
"-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=X0imGmcj5KcBPFcqlfYjIZakzGrzONGbRv5JMOnGrwc%3D",
"https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se"
"=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=" + restutil.REDACTED_TEXT),
("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07"
"-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=9hfxYvaZzrMahtGO1OgMUiFGnDOtZXulZ3skkv1eVBg%3D",
"https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se"
"=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https,"
"http&sig=" + restutil.REDACTED_TEXT),
("https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se=2018-07"
"-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https"
"&sig=cmluQEHnOGsVK9NDm83ruuPdPWNQcerfjOAbkspNZXU%3D",
"https://abc.def.xyz.123.net/?sv=2017-11-09&ss=b&srt=o&sp=r&se"
"=2018-07-26T02:20:42Z&st=2018-07-25T18:20:44Z&spr=https&sig"
"=" + restutil.REDACTED_TEXT)
]
for x in urls_tuples:
self.assertEquals(restutil.redact_sas_tokens_in_urls(x[0]), x[1])
@patch('azurelinuxagent.common.conf.get_httpproxy_port')
@patch('azurelinuxagent.common.conf.get_httpproxy_host')
def test_get_http_proxy_none_is_default(self, mock_host, mock_port):
mock_host.return_value = None
mock_port.return_value = None
h, p = restutil._get_http_proxy()
self.assertEqual(None, h)
self.assertEqual(None, p)
@patch('azurelinuxagent.common.conf.get_httpproxy_port')
@patch('azurelinuxagent.common.conf.get_httpproxy_host')
def test_get_http_proxy_configuration_overrides_env(self, mock_host, mock_port):
mock_host.return_value = "host"
mock_port.return_value = None
h, p = restutil._get_http_proxy()
self.assertEqual("host", h)
self.assertEqual(None, p)
self.assertEqual(1, mock_host.call_count)
self.assertEqual(1, mock_port.call_count)
@patch('azurelinuxagent.common.conf.get_httpproxy_port')
@patch('azurelinuxagent.common.conf.get_httpproxy_host')
def test_get_http_proxy_configuration_requires_host(self, mock_host, mock_port):
mock_host.return_value = None
mock_port.return_value = None
h, p = restutil._get_http_proxy()
self.assertEqual(None, h)
self.assertEqual(None, p)
self.assertEqual(1, mock_host.call_count)
self.assertEqual(0, mock_port.call_count)
@patch('azurelinuxagent.common.conf.get_httpproxy_host')
def test_get_http_proxy_http_uses_httpproxy(self, mock_host):
mock_host.return_value = None
with patch.dict(os.environ, {
'http_proxy' : 'http://foo.com:80',
'https_proxy' : 'https://bar.com:443'
}):
h, p = restutil._get_http_proxy()
self.assertEqual("foo.com", h)
self.assertEqual(80, p)
@patch('azurelinuxagent.common.conf.get_httpproxy_host')
def test_get_http_proxy_https_uses_httpsproxy(self, mock_host):
mock_host.return_value = None
with patch.dict(os.environ, {
'http_proxy' : 'http://foo.com:80',
'https_proxy' : 'https://bar.com:443'
}):
h, p = restutil._get_http_proxy(secure=True)
self.assertEqual("bar.com", h)
self.assertEqual(443, p)
@patch('azurelinuxagent.common.conf.get_httpproxy_host')
def test_get_http_proxy_ignores_user_in_httpproxy(self, mock_host):
mock_host.return_value = None
with patch.dict(os.environ, {
'http_proxy' : 'http://user:pw@foo.com:80'
}):
h, p = restutil._get_http_proxy()
self.assertEqual("foo.com", h)
self.assertEqual(80, p)
def test_get_no_proxy_with_values_set(self):
no_proxy_list = ["foo.com", "www.google.com"]
with patch.dict(os.environ, {
'no_proxy': ",".join(no_proxy_list)
}):
no_proxy_from_environment = restutil.get_no_proxy()
self.assertEquals(len(no_proxy_list), len(no_proxy_from_environment))
for i, j in zip(no_proxy_from_environment, no_proxy_list):
self.assertEqual(i, j)
def test_get_no_proxy_with_incorrect_variable_set(self):
no_proxy_list = ["foo.com", "www.google.com", "", ""]
no_proxy_list_cleaned = [entry for entry in no_proxy_list if entry]
with patch.dict(os.environ, {
'no_proxy': ",".join(no_proxy_list)
}):
no_proxy_from_environment = restutil.get_no_proxy()
self.assertEquals(len(no_proxy_list_cleaned), len(no_proxy_from_environment))
for i, j in zip(no_proxy_from_environment, no_proxy_list_cleaned):
print(i, j)
self.assertEqual(i, j)
def test_get_no_proxy_with_ip_addresses_set(self):
no_proxy_var = "10.0.0.1,10.0.0.2,10.0.0.3,10.0.0.4,10.0.0.5,10.0.0.6,10.0.0.7,10.0.0.8,10.0.0.9,10.0.0.10,"
no_proxy_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5',
'10.0.0.6', '10.0.0.7', '10.0.0.8', '10.0.0.9', '10.0.0.10']
with patch.dict(os.environ, {
'no_proxy': no_proxy_var
}):
no_proxy_from_environment = restutil.get_no_proxy()
self.assertEquals(len(no_proxy_list), len(no_proxy_from_environment))
for i, j in zip(no_proxy_from_environment, no_proxy_list):
self.assertEqual(i, j)
def test_get_no_proxy_default(self):
no_proxy_generator = restutil.get_no_proxy()
self.assertIsNone(no_proxy_generator)
def test_is_ipv4_address(self):
self.assertTrue(restutil.is_ipv4_address('8.8.8.8'))
self.assertFalse(restutil.is_ipv4_address('localhost.localdomain'))
self.assertFalse(restutil.is_ipv4_address('2001:4860:4860::8888')) # ipv6 tests
def test_is_valid_cidr(self):
self.assertTrue(restutil.is_valid_cidr('192.168.1.0/24'))
self.assertFalse(restutil.is_valid_cidr('8.8.8.8'))
self.assertFalse(restutil.is_valid_cidr('192.168.1.0/a'))
self.assertFalse(restutil.is_valid_cidr('192.168.1.0/128'))
self.assertFalse(restutil.is_valid_cidr('192.168.1.0/-1'))
self.assertFalse(restutil.is_valid_cidr('192.168.1.999/24'))
def test_address_in_network(self):
self.assertTrue(restutil.address_in_network('192.168.1.1', '192.168.1.0/24'))
self.assertFalse(restutil.address_in_network('172.16.0.1', '192.168.1.0/24'))
def test_dotted_netmask(self):
self.assertEquals(restutil.dotted_netmask(0), '0.0.0.0')
self.assertEquals(restutil.dotted_netmask(8), '255.0.0.0')
self.assertEquals(restutil.dotted_netmask(16), '255.255.0.0')
self.assertEquals(restutil.dotted_netmask(24), '255.255.255.0')
self.assertEquals(restutil.dotted_netmask(32), '255.255.255.255')
self.assertRaises(ValueError, restutil.dotted_netmask, 33)
def test_bypass_proxy(self):
no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16", "Microsoft.com"]
with patch.dict(os.environ, {
'no_proxy': ",".join(no_proxy_list)
}):
self.assertFalse(restutil.bypass_proxy("http://bar.com"))
self.assertTrue(restutil.bypass_proxy("http://foo.com"))
self.assertTrue(restutil.bypass_proxy("http://168.63.129.16"))
self.assertFalse(restutil.bypass_proxy("http://baz.com"))
self.assertFalse(restutil.bypass_proxy("http://10.1.1.1"))
self.assertTrue(restutil.bypass_proxy("http://www.microsoft.com"))
@patch("azurelinuxagent.common.future.httpclient.HTTPSConnection")
@patch("azurelinuxagent.common.future.httpclient.HTTPConnection")
def test_http_request_direct(self, HTTPConnection, HTTPSConnection):
mock_conn = \
MagicMock(getresponse=\
Mock(return_value=\
Mock(read=Mock(return_value="TheResults"))))
HTTPConnection.return_value = mock_conn
resp = restutil._http_request("GET", "foo", "/bar")
HTTPConnection.assert_has_calls([
call("foo", 80, timeout=10)
])
HTTPSConnection.assert_not_called()
mock_conn.request.assert_has_calls([
call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
self.assertEquals("TheResults", resp.read())
@patch("azurelinuxagent.common.future.httpclient.HTTPSConnection")
@patch("azurelinuxagent.common.future.httpclient.HTTPConnection")
def test_http_request_direct_secure(self, HTTPConnection, HTTPSConnection):
mock_conn = \
MagicMock(getresponse=\
Mock(return_value=\
Mock(read=Mock(return_value="TheResults"))))
HTTPSConnection.return_value = mock_conn
resp = restutil._http_request("GET", "foo", "/bar", secure=True)
HTTPConnection.assert_not_called()
HTTPSConnection.assert_has_calls([
call("foo", 443, timeout=10)
])
mock_conn.request.assert_has_calls([
call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
self.assertEquals("TheResults", resp.read())
@patch("azurelinuxagent.common.future.httpclient.HTTPSConnection")
@patch("azurelinuxagent.common.future.httpclient.HTTPConnection")
def test_http_request_proxy(self, HTTPConnection, HTTPSConnection):
mock_conn = \
MagicMock(getresponse=\
Mock(return_value=\
Mock(read=Mock(return_value="TheResults"))))
HTTPConnection.return_value = mock_conn
resp = restutil._http_request("GET", "foo", "/bar",
proxy_host="foo.bar", proxy_port=23333)
HTTPConnection.assert_has_calls([
call("foo.bar", 23333, timeout=10)
])
HTTPSConnection.assert_not_called()
mock_conn.request.assert_has_calls([
call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
self.assertEquals("TheResults", resp.read())
@patch("azurelinuxagent.common.utils.restutil._get_http_proxy")
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_proxy_with_no_proxy_check(self, _http_request, sleep, mock_get_http_proxy):
mock_http_resp = MagicMock()
mock_http_resp.read = Mock(return_value="hehe")
_http_request.return_value = mock_http_resp
mock_get_http_proxy.return_value = "host", 1234 # Return a host/port combination
no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16"]
with patch.dict(os.environ, {
'no_proxy': ",".join(no_proxy_list)
}):
# Test http get
resp = restutil.http_get("http://foo.com", use_proxy=True)
self.assertEquals("hehe", resp.read())
self.assertEquals(0, mock_get_http_proxy.call_count)
# Test http get
resp = restutil.http_get("http://bar.com", use_proxy=True)
self.assertEquals("hehe", resp.read())
self.assertEquals(1, mock_get_http_proxy.call_count)
def test_proxy_conditions_with_no_proxy(self):
should_use_proxy = True
should_not_use_proxy = False
use_proxy = True
no_proxy_list = ["foo.com", "www.google.com", "168.63.129.16"]
with patch.dict(os.environ, {
'no_proxy': ",".join(no_proxy_list)
}):
host = "10.0.0.1"
self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host))
host = "foo.com"
self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host))
host = "www.google.com"
self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host))
host = "168.63.129.16"
self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host))
host = "www.bar.com"
self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host))
no_proxy_list = ["10.0.0.1/24"]
with patch.dict(os.environ, {
'no_proxy': ",".join(no_proxy_list)
}):
host = "www.bar.com"
self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host))
host = "10.0.0.1"
self.assertEquals(should_not_use_proxy, use_proxy and not restutil.bypass_proxy(host))
host = "10.0.1.1"
self.assertEquals(should_use_proxy, use_proxy and not restutil.bypass_proxy(host))
# When No_proxy is empty
with patch.dict(os.environ, {
'no_proxy': ""
}):
host = "10.0.0.1"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "foo.com"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "www.google.com"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "168.63.129.16"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "www.bar.com"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "10.0.0.1"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "10.0.1.1"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
# When os.environ is empty - No global variables defined.
with patch.dict(os.environ, {}):
host = "10.0.0.1"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "foo.com"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "www.google.com"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "168.63.129.16"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "www.bar.com"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "10.0.0.1"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
host = "10.0.1.1"
self.assertTrue(use_proxy and not restutil.bypass_proxy(host))
@patch("azurelinuxagent.common.future.httpclient.HTTPSConnection")
@patch("azurelinuxagent.common.future.httpclient.HTTPConnection")
def test_http_request_proxy_secure(self, HTTPConnection, HTTPSConnection):
mock_conn = \
MagicMock(getresponse=\
Mock(return_value=\
Mock(read=Mock(return_value="TheResults"))))
HTTPSConnection.return_value = mock_conn
resp = restutil._http_request("GET", "foo", "/bar",
proxy_host="foo.bar", proxy_port=23333,
secure=True)
HTTPConnection.assert_not_called()
HTTPSConnection.assert_has_calls([
call("foo.bar", 23333, timeout=10)
])
mock_conn.request.assert_has_calls([
call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
self.assertEquals("TheResults", resp.read())
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_with_retry(self, _http_request, sleep):
mock_http_resp = MagicMock()
mock_http_resp.read = Mock(return_value="hehe")
_http_request.return_value = mock_http_resp
# Test http get
resp = restutil.http_get("http://foo.bar")
self.assertEquals("hehe", resp.read())
# Test https get
resp = restutil.http_get("https://foo.bar")
self.assertEquals("hehe", resp.read())
# Test http failure
_http_request.side_effect = httpclient.HTTPException("Http failure")
self.assertRaises(restutil.HttpError, restutil.http_get,
"http://foo.bar")
# Test http failure
_http_request.side_effect = IOError("IO failure")
self.assertRaises(restutil.HttpError, restutil.http_get,
"http://foo.bar")
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_status_codes(self, _http_request, _sleep):
_http_request.side_effect = [
Mock(status=httpclient.SERVICE_UNAVAILABLE),
Mock(status=httpclient.OK)
]
restutil.http_get("https://foo.bar")
self.assertEqual(2, _http_request.call_count)
self.assertEqual(1, _sleep.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_passed_status_codes(self, _http_request, _sleep):
# Ensure the code is not part of the standard set
self.assertFalse(httpclient.UNAUTHORIZED in restutil.RETRY_CODES)
_http_request.side_effect = [
Mock(status=httpclient.UNAUTHORIZED),
Mock(status=httpclient.OK)
]
restutil.http_get("https://foo.bar", retry_codes=[httpclient.UNAUTHORIZED])
self.assertEqual(2, _http_request.call_count)
self.assertEqual(1, _sleep.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_with_fibonacci_delay(self, _http_request, _sleep):
# Ensure the code is not a throttle code
self.assertFalse(httpclient.BAD_GATEWAY in restutil.THROTTLE_CODES)
_http_request.side_effect = [
Mock(status=httpclient.BAD_GATEWAY)
for i in range(restutil.DEFAULT_RETRIES)
] + [Mock(status=httpclient.OK)]
restutil.http_get("https://foo.bar",
max_retry=restutil.DEFAULT_RETRIES+1)
self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count)
self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count)
self.assertEqual(
[
call(restutil._compute_delay(i+1, restutil.DELAY_IN_SECONDS))
for i in range(restutil.DEFAULT_RETRIES)],
_sleep.call_args_list)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_with_constant_delay_when_throttled(self, _http_request, _sleep):
# Ensure the code is a throttle code
self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES)
_http_request.side_effect = [
Mock(status=httpclient.SERVICE_UNAVAILABLE)
for i in range(restutil.DEFAULT_RETRIES)
] + [Mock(status=httpclient.OK)]
restutil.http_get("https://foo.bar",
max_retry=restutil.DEFAULT_RETRIES+1)
self.assertEqual(restutil.DEFAULT_RETRIES+1, _http_request.call_count)
self.assertEqual(restutil.DEFAULT_RETRIES, _sleep.call_count)
self.assertEqual(
[call(1) for i in range(restutil.DEFAULT_RETRIES)],
_sleep.call_args_list)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_for_safe_minimum_number_when_throttled(self, _http_request, _sleep):
# Ensure the code is a throttle code
self.assertTrue(httpclient.SERVICE_UNAVAILABLE in restutil.THROTTLE_CODES)
_http_request.side_effect = [
Mock(status=httpclient.SERVICE_UNAVAILABLE)
for i in range(restutil.THROTTLE_RETRIES-1)
] + [Mock(status=httpclient.OK)]
restutil.http_get("https://foo.bar",
max_retry=1)
self.assertEqual(restutil.THROTTLE_RETRIES, _http_request.call_count)
self.assertEqual(restutil.THROTTLE_RETRIES-1, _sleep.call_count)
self.assertEqual(
[call(1) for i in range(restutil.THROTTLE_RETRIES-1)],
_sleep.call_args_list)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_raises_for_resource_gone(self, _http_request, _sleep):
_http_request.side_effect = [
Mock(status=httpclient.GONE)
]
self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar")
self.assertEqual(1, _http_request.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_raises_for_invalid_container_configuration(self, _http_request, _sleep):
def read():
return b'{ "errorCode": "InvalidContainerConfiguration", "message": "Invalid request." }'
_http_request.side_effect = [
Mock(status=httpclient.BAD_REQUEST, reason='Bad Request', read=read)
]
self.assertRaises(InvalidContainerError, restutil.http_get, "https://foo.bar")
self.assertEqual(1, _http_request.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_raises_for_invalid_role_configuration(self, _http_request, _sleep):
def read():
return b'{ "errorCode": "RequestRoleConfigFileNotFound", "message": "Invalid request." }'
_http_request.side_effect = [
Mock(status=httpclient.GONE, reason='Resource Gone', read=read)
]
self.assertRaises(ResourceGoneError, restutil.http_get, "https://foo.bar")
self.assertEqual(1, _http_request.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_exceptions(self, _http_request, _sleep):
# Testing each exception is difficult because they have varying
# signatures; for now, test one and ensure the set is unchanged
recognized_exceptions = [
httpclient.NotConnected,
httpclient.IncompleteRead,
httpclient.ImproperConnectionState,
httpclient.BadStatusLine
]
self.assertEqual(recognized_exceptions, restutil.RETRY_EXCEPTIONS)
_http_request.side_effect = [
httpclient.IncompleteRead(''),
Mock(status=httpclient.OK)
]
restutil.http_get("https://foo.bar")
self.assertEqual(2, _http_request.call_count)
self.assertEqual(1, _sleep.call_count)
@patch("time.sleep")
@patch("azurelinuxagent.common.utils.restutil._http_request")
def test_http_request_retries_ioerrors(self, _http_request, _sleep):
ioerror = IOError()
ioerror.errno = 42
_http_request.side_effect = [
ioerror,
Mock(status=httpclient.OK)
]
restutil.http_get("https://foo.bar")
self.assertEqual(2, _http_request.call_count)
self.assertEqual(1, _sleep.call_count)
def test_request_failed(self):
self.assertTrue(restutil.request_failed(None))
resp = Mock()
for status in restutil.OK_CODES:
resp.status = status
self.assertFalse(restutil.request_failed(resp))
self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES)
resp.status = httpclient.BAD_REQUEST
self.assertTrue(restutil.request_failed(resp))
self.assertFalse(
restutil.request_failed(
resp, ok_codes=[httpclient.BAD_REQUEST]))
def test_request_succeeded(self):
self.assertFalse(restutil.request_succeeded(None))
resp = Mock()
for status in restutil.OK_CODES:
resp.status = status
self.assertTrue(restutil.request_succeeded(resp))
self.assertFalse(httpclient.BAD_REQUEST in restutil.OK_CODES)
resp.status = httpclient.BAD_REQUEST
self.assertFalse(restutil.request_succeeded(resp))
self.assertTrue(
restutil.request_succeeded(
resp, ok_codes=[httpclient.BAD_REQUEST]))
def test_read_response_error(self):
"""
Validate the read_response_error method handles encoding correctly
"""
responses = ['message', b'message', '\x80message\x80']
response = MagicMock()
response.status = 'status'
response.reason = 'reason'
with patch.object(response, 'read') as patch_response:
for s in responses:
patch_response.return_value = s
result = restutil.read_response_error(response)
print("RESPONSE: {0}".format(s))
print("RESULT: {0}".format(result))
print("PRESENT: {0}".format('[status: reason]' in result))
self.assertTrue('[status: reason]' in result)
self.assertTrue('message' in result)
def test_read_response_bytes(self):
response_bytes = '7b:0a:20:20:20:20:22:65:72:72:6f:72:43:6f:64:65:22:' \
'3a:20:22:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:' \
'69:73:20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:' \
'69:73:20:6f:70:65:72:61:74:69:6f:6e:2e:22:2c:0a:20:' \
'20:20:20:22:6d:65:73:73:61:67:65:22:3a:20:22:c3:af:' \
'c2:bb:c2:bf:3c:3f:78:6d:6c:20:76:65:72:73:69:6f:6e:' \
'3d:22:31:2e:30:22:20:65:6e:63:6f:64:69:6e:67:3d:22:' \
'75:74:66:2d:38:22:3f:3e:3c:45:72:72:6f:72:3e:3c:43:' \
'6f:64:65:3e:49:6e:76:61:6c:69:64:42:6c:6f:62:54:79:' \
'70:65:3c:2f:43:6f:64:65:3e:3c:4d:65:73:73:61:67:65:' \
'3e:54:68:65:20:62:6c:6f:62:20:74:79:70:65:20:69:73:' \
'20:69:6e:76:61:6c:69:64:20:66:6f:72:20:74:68:69:73:' \
'20:6f:70:65:72:61:74:69:6f:6e:2e:0a:52:65:71:75:65:' \
'73:74:49:64:3a:63:37:34:32:39:30:63:62:2d:30:30:30:' \
'31:2d:30:30:62:35:2d:30:36:64:61:2d:64:64:36:36:36:' \
'61:30:30:30:22:2c:0a:20:20:20:20:22:64:65:74:61:69:' \
'6c:73:22:3a:20:22:22:0a:7d'.split(':')
expected_response = '[HTTP Failed] [status: reason] {\n "errorCode": "The blob ' \
'type is invalid for this operation.",\n ' \
'"message": "<?xml version="1.0" ' \
'encoding="utf-8"?>' \
'<Error><Code>InvalidBlobType</Code><Message>The ' \
'blob type is invalid for this operation.\n' \
'RequestId:c74290cb-0001-00b5-06da-dd666a000",' \
'\n "details": ""\n}'
response_string = ''.join(chr(int(b, 16)) for b in response_bytes)
response = MagicMock()
response.status = 'status'
response.reason = 'reason'
with patch.object(response, 'read') as patch_response:
patch_response.return_value = response_string
result = restutil.read_response_error(response)
self.assertEqual(result, expected_response)
try:
raise HttpError("{0}".format(result))
except HttpError as e:
self.assertTrue(result in ustr(e))
if __name__ == '__main__':
unittest.main()
|
|
# pylint: disable-msg=E1101,W0612
import operator
from datetime import datetime
import pytest
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, bdate_range,
isna, compat, _np_version_under1p12)
from pandas.tseries.offsets import BDay
import pandas.util.testing as tm
from pandas.compat import range
from pandas.core.reshape.util import cartesian_product
import pandas.core.sparse.frame as spf
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.core.sparse.api import SparseSeries
from pandas.tests.series.test_api import SharedWithSparse
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
index = np.arange(20)
arr[:2] = nan
arr[5:10] = nan
arr[-3:] = nan
return arr, index
def _test_data2():
# nan-based
arr = np.arange(15, dtype=float)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
return arr, index
def _test_data1_zero():
# zero-based
arr, index = _test_data1()
arr[np.isnan(arr)] = 0
return arr, index
def _test_data2_zero():
# zero-based
arr, index = _test_data2()
arr[np.isnan(arr)] = 0
return arr, index
class TestSparseSeries(SharedWithSparse):
series_klass = SparseSeries
# SharedWithSparse tests use generic, series_klass-agnostic assertion
_assert_series_equal = staticmethod(tm.assert_sp_series_equal)
def setup_method(self, method):
arr, index = _test_data1()
date_index = bdate_range('1/1/2011', periods=len(index))
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
self.ts = self.bseries
self.btseries = SparseSeries(arr, index=date_index, kind='block')
self.iseries = SparseSeries(arr, index=index, kind='integer',
name='iseries')
arr, index = _test_data2()
self.bseries2 = SparseSeries(arr, index=index, kind='block')
self.iseries2 = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
self.ziseries = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
arr, index = _test_data2_zero()
self.zbseries2 = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries2 = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
def test_constructor_dict_input(self):
# gh-16905
constructor_dict = {1: 1.}
index = [0, 1, 2]
# Series with index passed in
series = pd.Series(constructor_dict)
expected = SparseSeries(series, index=index)
result = SparseSeries(constructor_dict, index=index)
tm.assert_sp_series_equal(result, expected)
# Series with index and dictionary with no index
expected = SparseSeries(series)
result = SparseSeries(constructor_dict)
tm.assert_sp_series_equal(result, expected)
def test_constructor_dtype(self):
arr = SparseSeries([np.nan, 1, 2, np.nan])
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == np.float64
assert arr.fill_value == 0
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)
assert arr.dtype == np.int64
assert np.isnan(arr.fill_value)
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
# printing & access
df.iloc[:1]
df['col']
df.dtypes
str(df)
tm.assert_sp_series_equal(df['col'], self.bseries, check_names=False)
result = df.iloc[:, 0]
tm.assert_sp_series_equal(result, self.bseries, check_names=False)
# blocking
expected = Series({'col': 'float64:sparse'})
result = df.ftypes
tm.assert_series_equal(expected, result)
def test_constructor_preserve_attr(self):
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == np.int64
assert arr.fill_value == 0
s = pd.SparseSeries(arr, name='x')
assert s.dtype == np.int64
assert s.fill_value == 0
def test_series_density(self):
# GH2803
ts = Series(np.random.randn(10))
ts[2:-2] = nan
sts = ts.to_sparse()
density = sts.density # don't die
assert density == 4 / 10.0
def test_sparse_to_dense(self):
arr, index = _test_data1()
series = self.bseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='bseries'))
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
series = self.bseries.to_dense(sparse_only=True)
indexer = np.isfinite(arr)
exp = Series(arr[indexer], index=index[indexer], name='bseries')
tm.assert_series_equal(series, exp)
series = self.iseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='iseries'))
arr, index = _test_data1_zero()
series = self.zbseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='zbseries'))
series = self.ziseries.to_dense()
tm.assert_series_equal(series, Series(arr))
def test_to_dense_fill_value(self):
s = pd.Series([1, np.nan, np.nan, 3, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([1, np.nan, 0, 3, 0])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
def test_dense_to_sparse(self):
series = self.bseries.to_dense()
bseries = series.to_sparse(kind='block')
iseries = series.to_sparse(kind='integer')
tm.assert_sp_series_equal(bseries, self.bseries)
tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)
assert iseries.name == self.bseries.name
assert len(series) == len(bseries)
assert len(series) == len(iseries)
assert series.shape == bseries.shape
assert series.shape == iseries.shape
# non-NaN fill value
series = self.zbseries.to_dense()
zbseries = series.to_sparse(kind='block', fill_value=0)
ziseries = series.to_sparse(kind='integer', fill_value=0)
tm.assert_sp_series_equal(zbseries, self.zbseries)
tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)
assert ziseries.name == self.zbseries.name
assert len(series) == len(zbseries)
assert len(series) == len(ziseries)
assert series.shape == zbseries.shape
assert series.shape == ziseries.shape
def test_to_dense_preserve_name(self):
assert (self.bseries.name is not None)
result = self.bseries.to_dense()
assert result.name == self.bseries.name
def test_constructor(self):
# test setup guys
assert np.isnan(self.bseries.fill_value)
assert isinstance(self.bseries.sp_index, BlockIndex)
assert np.isnan(self.iseries.fill_value)
assert isinstance(self.iseries.sp_index, IntIndex)
assert self.zbseries.fill_value == 0
tm.assert_numpy_array_equal(self.zbseries.values.values,
self.bseries.to_dense().fillna(0).values)
# pass SparseSeries
def _check_const(sparse, name):
# use passed series name
result = SparseSeries(sparse)
tm.assert_sp_series_equal(result, sparse)
assert sparse.name == name
assert result.name == name
# use passed name
result = SparseSeries(sparse, name='x')
tm.assert_sp_series_equal(result, sparse, check_names=False)
assert result.name == 'x'
_check_const(self.bseries, 'bseries')
_check_const(self.iseries, 'iseries')
_check_const(self.zbseries, 'zbseries')
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
assert isinstance(s5, SparseSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)
# pass dict?
# don't copy the data by default
values = np.ones(self.bseries.npoints)
sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
sp.sp_values[:5] = 97
assert values[0] == 97
assert len(sp) == 20
assert sp.shape == (20, )
# but can make it copy!
sp = SparseSeries(values, sparse_index=self.bseries.sp_index,
copy=True)
sp.sp_values[:5] = 100
assert values[0] == 97
assert len(sp) == 20
assert sp.shape == (20, )
def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
assert (sp.loc[:99] == data).all()
assert isna(sp.loc[100:]).all()
data = np.nan
sp = SparseSeries(data, np.arange(100))
assert len(sp) == 100
assert sp.shape == (100, )
def test_constructor_ndarray(self):
pass
def test_constructor_nonnan(self):
arr = [0, 0, 0, nan, nan]
sp_series = SparseSeries(arr, fill_value=0)
tm.assert_numpy_array_equal(sp_series.values.values, np.array(arr))
assert len(sp_series) == 5
assert sp_series.shape == (5, )
def test_constructor_empty(self):
# see gh-9272
sp = SparseSeries()
assert len(sp.index) == 0
assert sp.shape == (0, )
def test_copy_astype(self):
cop = self.bseries.astype(np.float64)
assert cop is not self.bseries
assert cop.sp_index is self.bseries.sp_index
assert cop.dtype == np.float64
cop2 = self.iseries.copy()
tm.assert_sp_series_equal(cop, self.bseries)
tm.assert_sp_series_equal(cop2, self.iseries)
# test that data is copied
cop[:5] = 97
assert cop.sp_values[0] == 97
assert self.bseries.sp_values[0] != 97
# correct fill value
zbcop = self.zbseries.copy()
zicop = self.ziseries.copy()
tm.assert_sp_series_equal(zbcop, self.zbseries)
tm.assert_sp_series_equal(zicop, self.ziseries)
# no deep copy
view = self.bseries.copy(deep=False)
view.sp_values[:5] = 5
assert (self.bseries.sp_values[:5] == 5).all()
def test_shape(self):
# see gh-10452
assert self.bseries.shape == (20, )
assert self.btseries.shape == (20, )
assert self.iseries.shape == (20, )
assert self.bseries2.shape == (15, )
assert self.iseries2.shape == (15, )
assert self.zbseries2.shape == (15, )
assert self.ziseries2.shape == (15, )
def test_astype(self):
with pytest.raises(ValueError):
self.bseries.astype(np.int64)
def test_astype_all(self):
orig = pd.Series(np.array([1, 2, 3]))
s = SparseSeries(orig)
types = [np.float64, np.float32, np.int64,
np.int32, np.int16, np.int8]
for typ in types:
res = s.astype(typ)
assert res.dtype == typ
tm.assert_series_equal(res.to_dense(), orig.astype(typ))
def test_kind(self):
assert self.bseries.kind == 'block'
assert self.iseries.kind == 'integer'
def test_to_frame(self):
# GH 9850
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x')
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.SparseDataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(name='y'), exp)
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x', fill_value=0)
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]},
default_fill_value=0)
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.DataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_frame_equal(s.to_frame(name='y').to_dense(), exp)
def test_pickle(self):
def _test_roundtrip(series):
unpickled = tm.round_trip_pickle(series)
tm.assert_sp_series_equal(series, unpickled)
tm.assert_series_equal(series.to_dense(), unpickled.to_dense())
self._check_all(_test_roundtrip)
def _check_all(self, check_func):
check_func(self.bseries)
check_func(self.iseries)
check_func(self.zbseries)
check_func(self.ziseries)
def test_getitem(self):
def _check_getitem(sp, dense):
for idx, val in compat.iteritems(dense):
tm.assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
tm.assert_almost_equal(sp[i], dense[i])
# j = np.float64(i)
# assert_almost_equal(sp[j], dense[j])
# API change 1/6/2012
# negative getitem works
# for i in xrange(len(dense)):
# assert_almost_equal(sp[-i], dense[-i])
_check_getitem(self.bseries, self.bseries.to_dense())
_check_getitem(self.btseries, self.btseries.to_dense())
_check_getitem(self.zbseries, self.zbseries.to_dense())
_check_getitem(self.iseries, self.iseries.to_dense())
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
pytest.raises(Exception, self.bseries.__getitem__,
len(self.bseries) + 1)
# index not contained
pytest.raises(Exception, self.btseries.__getitem__,
self.btseries.index[-1] + BDay())
def test_get_get_value(self):
tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])
assert self.bseries.get(len(self.bseries) + 1) is None
dt = self.btseries.index[10]
result = self.btseries.get(dt)
expected = self.btseries.to_dense()[dt]
tm.assert_almost_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_almost_equal(
self.bseries.get_value(10), self.bseries[10])
def test_set_value(self):
idx = self.btseries.index[7]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.btseries.set_value(idx, 0)
assert self.btseries[idx] == 0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.iseries.set_value('foobar', 0)
assert self.iseries.index[-1] == 'foobar'
assert self.iseries['foobar'] == 0
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
assert isinstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
tm.assert_sp_series_equal(res, expected)
res = self.bseries[:5]
assert isinstance(res, SparseSeries)
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
# negative indices
res = self.bseries[:-3]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
def test_take(self):
def _compare_with_dense(sp):
dense = sp.to_dense()
def _compare(idx):
dense_result = dense.take(idx).values
sparse_result = sp.take(idx)
assert isinstance(sparse_result, SparseSeries)
tm.assert_almost_equal(dense_result,
sparse_result.values.values)
_compare([1., 2., 3., 4., 5., 0.])
_compare([7, 2, 9, 0, 4])
_compare([3, 6, 3, 4, 7])
self._check_all(_compare_with_dense)
pytest.raises(Exception, self.bseries.take,
[0, len(self.bseries) + 1])
# Corner case
sp = SparseSeries(np.ones(10) * nan)
exp = pd.Series(np.repeat(nan, 5))
tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp)
with tm.assert_produces_warning(FutureWarning):
sp.take([1, 5], convert=True)
with tm.assert_produces_warning(FutureWarning):
sp.take([1, 5], convert=False)
def test_numpy_take(self):
sp = SparseSeries([1.0, 2.0, 3.0])
indices = [1, 2]
if not _np_version_under1p12:
tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),
np.take(sp.to_dense(), indices, axis=0))
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.take,
sp, indices, out=np.empty(sp.shape))
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.take,
sp, indices, out=None, mode='clip')
def test_setitem(self):
self.bseries[5] = 7.
assert self.bseries[5] == 7.
def test_setslice(self):
self.bseries[5:10] = 7.
tm.assert_series_equal(self.bseries[5:10].to_dense(),
Series(7., index=range(5, 10),
name=self.bseries.name))
def test_operators(self):
def _check_op(a, b, op):
sp_result = op(a, b)
adense = a.to_dense() if isinstance(a, SparseSeries) else a
bdense = b.to_dense() if isinstance(b, SparseSeries) else b
dense_result = op(adense, bdense)
tm.assert_almost_equal(sp_result.to_dense(), dense_result)
def check(a, b):
_check_op(a, b, operator.add)
_check_op(a, b, operator.sub)
_check_op(a, b, operator.truediv)
_check_op(a, b, operator.floordiv)
_check_op(a, b, operator.mul)
_check_op(a, b, lambda x, y: operator.add(y, x))
_check_op(a, b, lambda x, y: operator.sub(y, x))
_check_op(a, b, lambda x, y: operator.truediv(y, x))
_check_op(a, b, lambda x, y: operator.floordiv(y, x))
_check_op(a, b, lambda x, y: operator.mul(y, x))
# NaN ** 0 = 1 in C?
# _check_op(a, b, operator.pow)
# _check_op(a, b, lambda x, y: operator.pow(y, x))
check(self.bseries, self.bseries)
check(self.iseries, self.iseries)
check(self.bseries, self.iseries)
check(self.bseries, self.bseries2)
check(self.bseries, self.iseries2)
check(self.iseries, self.iseries2)
# scalar value
check(self.bseries, 5)
# zero-based
check(self.zbseries, self.zbseries * 2)
check(self.zbseries, self.zbseries2)
check(self.ziseries, self.ziseries2)
# with dense
result = self.bseries + self.bseries.to_dense()
tm.assert_sp_series_equal(result, self.bseries + self.bseries)
def test_binary_operators(self):
# skipping for now #####
import pytest
pytest.skip("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
expected = op(tmp, self.bseries)
iop(tmp, self.bseries)
tm.assert_sp_series_equal(tmp, expected)
inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, "i%s" % op),
getattr(operator, op))
def test_abs(self):
s = SparseSeries([1, 2, -3], name='x')
expected = SparseSeries([1, 2, 3], name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
s = SparseSeries([1, -2, 2, -3], fill_value=-2, name='x')
expected = SparseSeries([1, 2, 3], sparse_index=s.sp_index,
fill_value=2, name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
def test_reindex(self):
def _compare_with_series(sps, new_index):
spsre = sps.reindex(new_index)
series = sps.to_dense()
seriesre = series.reindex(new_index)
seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
tm.assert_sp_series_equal(spsre, seriesre)
tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())
_compare_with_series(self.bseries, self.bseries.index[::2])
_compare_with_series(self.bseries, list(self.bseries.index[::2]))
_compare_with_series(self.bseries, self.bseries.index[:10])
_compare_with_series(self.bseries, self.bseries.index[5:])
_compare_with_series(self.zbseries, self.zbseries.index[::2])
_compare_with_series(self.zbseries, self.zbseries.index[:10])
_compare_with_series(self.zbseries, self.zbseries.index[5:])
# special cases
same_index = self.bseries.reindex(self.bseries.index)
tm.assert_sp_series_equal(self.bseries, same_index)
assert same_index is not self.bseries
# corner cases
sp = SparseSeries([], index=[])
# TODO: sp_zero is not used anywhere...remove?
sp_zero = SparseSeries([], index=[], fill_value=0) # noqa
_compare_with_series(sp, np.arange(10))
# with copy=False
reindexed = self.bseries.reindex(self.bseries.index, copy=True)
reindexed.sp_values[:] = 1.
assert (self.bseries.sp_values != 1.).all()
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))
def test_sparse_reindex(self):
length = 10
def _check(values, index1, index2, fill_value):
first_series = SparseSeries(values, sparse_index=index1,
fill_value=fill_value)
reindexed = first_series.sparse_reindex(index2)
assert reindexed.sp_index is index2
int_indices1 = index1.to_int_index().indices
int_indices2 = index2.to_int_index().indices
expected = Series(values, index=int_indices1)
expected = expected.reindex(int_indices2).fillna(fill_value)
tm.assert_almost_equal(expected.values, reindexed.sp_values)
# make sure level argument asserts
# TODO: expected is not used anywhere...remove?
expected = expected.reindex(int_indices2).fillna(fill_value) # noqa
def _check_with_fill_value(values, first, second, fill_value=nan):
i_index1 = IntIndex(length, first)
i_index2 = IntIndex(length, second)
b_index1 = i_index1.to_block_index()
b_index2 = i_index2.to_block_index()
_check(values, i_index1, i_index2, fill_value)
_check(values, b_index1, b_index2, fill_value)
def _check_all(values, first, second):
_check_with_fill_value(values, first, second, fill_value=nan)
_check_with_fill_value(values, first, second, fill_value=0)
index1 = [2, 4, 5, 6, 8, 9]
values1 = np.arange(6.)
_check_all(values1, index1, [2, 4, 5])
_check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
_check_all(values1, index1, [0, 1])
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
first_series = SparseSeries(values1,
sparse_index=IntIndex(length, index1),
fill_value=nan)
with tm.assert_raises_regex(TypeError,
'new index must be a SparseIndex'):
reindexed = first_series.sparse_reindex(0) # noqa
def test_repr(self):
# TODO: These aren't used
bsrepr = repr(self.bseries) # noqa
isrepr = repr(self.iseries) # noqa
def test_iter(self):
pass
def test_truncate(self):
pass
def test_fillna(self):
pass
def test_groupby(self):
pass
def test_reductions(self):
def _compare_with_dense(obj, op):
sparse_result = getattr(obj, op)()
series = obj.to_dense()
dense_result = getattr(series, op)()
assert sparse_result == dense_result
to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']
def _compare_all(obj):
for op in to_compare:
_compare_with_dense(obj, op)
_compare_all(self.bseries)
self.bseries.sp_values[5:10] = np.NaN
_compare_all(self.bseries)
_compare_all(self.zbseries)
self.zbseries.sp_values[5:10] = np.NaN
_compare_all(self.zbseries)
series = self.zbseries.copy()
series.fill_value = 2
_compare_all(series)
nonna = Series(np.random.randn(20)).to_sparse()
_compare_all(nonna)
nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
_compare_all(nonna2)
def test_dropna(self):
sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)
sp_valid = sp.valid()
expected = sp.to_dense().valid()
expected = expected[expected != 0]
exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block')
tm.assert_sp_array_equal(sp_valid.values, exp_arr)
tm.assert_index_equal(sp_valid.index, expected.index)
assert len(sp_valid.sp_values) == 2
result = self.bseries.dropna()
expected = self.bseries.to_dense().dropna()
assert not isinstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_homogenize(self):
def _check_matches(indices, expected):
data = {}
for i, idx in enumerate(indices):
data[i] = SparseSeries(idx.to_int_index().indices,
sparse_index=idx, fill_value=np.nan)
# homogenized is only valid with NaN fill values
homogenized = spf.homogenize(data)
for k, v in compat.iteritems(homogenized):
assert (v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),
BlockIndex(10, [0], [10])]
expected1 = BlockIndex(10, [2, 6], [2, 3])
_check_matches(indices1, expected1)
indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]
expected2 = indices2[0]
_check_matches(indices2, expected2)
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
with tm.assert_raises_regex(TypeError, "NaN fill value"):
spf.homogenize(data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
cop.fill_value = 0
result = self.bseries / cop
assert np.isnan(result.fill_value)
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
# 1 / 0 is inf
assert np.isinf(result.fill_value)
def test_fill_value_when_combine_const(self):
# GH12723
s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
def test_shift(self):
series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6))
shifted = series.shift(0)
assert shifted is not series
tm.assert_sp_series_equal(shifted, series)
f = lambda s: s.shift(1)
_dense_series_compare(series, f)
f = lambda s: s.shift(-2)
_dense_series_compare(series, f)
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=bdate_range('1/1/2000', periods=6))
f = lambda s: s.shift(2, freq='B')
_dense_series_compare(series, f)
f = lambda s: s.shift(2, freq=BDay())
_dense_series_compare(series, f)
def test_shift_nan(self):
# GH 12908
orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=0))
def test_shift_dtype(self):
# GH 12908
orig = pd.Series([1, 2, 3, 4], dtype=np.int64)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
sparse = orig.to_sparse(fill_value=np.nan)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=np.nan))
# shift(1) or more span changes dtype to float64
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
def test_shift_dtype_fill_value(self):
# GH 12908
orig = pd.Series([1, 0, 0, 4], dtype=np.int64)
for v in [0, 1, np.nan]:
sparse = orig.to_sparse(fill_value=v)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=v))
def test_combine_first(self):
s = self.bseries
result = s[::2].combine_first(s)
result2 = s[::2].combine_first(s.to_dense())
expected = s[::2].to_dense().combine_first(s.to_dense())
expected = expected.to_sparse(fill_value=s.fill_value)
tm.assert_sp_series_equal(result, result2)
tm.assert_sp_series_equal(result, expected)
class TestSparseHandlingMultiIndexes(object):
def setup_method(self, method):
miindex = pd.MultiIndex.from_product(
[["x", "y"], ["10", "20"]], names=['row-foo', 'row-bar'])
micol = pd.MultiIndex.from_product(
[['a', 'b', 'c'], ["1", "2"]], names=['col-foo', 'col-bar'])
dense_multiindex_frame = pd.DataFrame(
index=miindex, columns=micol).sort_index().sort_index(axis=1)
self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)
def test_to_sparse_preserve_multiindex_names_columns(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
sparse_multiindex_frame = sparse_multiindex_frame.copy()
tm.assert_index_equal(sparse_multiindex_frame.columns,
self.dense_multiindex_frame.columns)
def test_round_trip_preserve_multiindex_names(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()
tm.assert_frame_equal(self.dense_multiindex_frame,
round_trip_multiindex_frame,
check_column_type=True,
check_names=True)
class TestSparseSeriesScipyInteraction(object):
# Issue 8048: add SparseSeries coo methods
def setup_method(self, method):
tm._skip_if_no_scipy()
import scipy.sparse
# SparseSeries inputs used in tests, the tests rely on the order
self.sparse_series = []
s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])
s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
self.sparse_series.append(s.to_sparse())
ss = self.sparse_series[0].copy()
ss.index.names = [3, 0, 1, 2]
self.sparse_series.append(ss)
ss = pd.Series([
nan
] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()
for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):
ss[k] = v
self.sparse_series.append(ss)
# results used in tests
self.coo_matrices = []
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))
self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)],
[(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]
self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]
def test_to_coo_text_names_integer_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_integer_row_levels_sort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1],
'column_levels': [2, 3],
'sort_labels': True}
result = (self.coo_matrices[1], self.ils[1], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B', 'C'],
'column_levels': ['D'],
'sort_labels': False}
result = (self.coo_matrices[2], self.ils[2], self.jls[1])
self._run_test(ss, kwargs, result)
def test_to_coo_integer_names_integer_row_levels_nosort(self):
ss = self.sparse_series[1]
kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_bad_partition_nonnull_intersection(self):
ss = self.sparse_series[0]
pytest.raises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])
def test_to_coo_bad_partition_small_union(self):
ss = self.sparse_series[0]
pytest.raises(ValueError, ss.to_coo, ['A'], ['C', 'D'])
def test_to_coo_nlevels_less_than_two(self):
ss = self.sparse_series[0]
ss.index = np.arange(len(ss.index))
pytest.raises(ValueError, ss.to_coo)
def test_to_coo_bad_ilevel(self):
ss = self.sparse_series[0]
pytest.raises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])
def test_to_coo_duplicate_index_entries(self):
ss = pd.concat([self.sparse_series[0],
self.sparse_series[0]]).to_sparse()
pytest.raises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])
def test_from_coo_dense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
check = self.sparse_series[2]
tm.assert_sp_series_equal(ss, check)
def test_from_coo_nodense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)
check = self.sparse_series[2]
check = check.dropna().to_sparse()
tm.assert_sp_series_equal(ss, check)
def test_from_coo_long_repr(self):
# GH 13114
# test it doesn't raise error. Formatting is tested in test_format
tm._skip_if_no_scipy()
import scipy.sparse
sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))
repr(sparse)
def _run_test(self, ss, kwargs, check):
results = ss.to_coo(**kwargs)
self._check_results_to_coo(results, check)
# for every test, also test symmetry property (transpose), switch
# row_levels and column_levels
d = kwargs.copy()
d['row_levels'] = kwargs['column_levels']
d['column_levels'] = kwargs['row_levels']
results = ss.to_coo(**d)
results = (results[0].T, results[2], results[1])
self._check_results_to_coo(results, check)
def _check_results_to_coo(self, results, check):
(A, il, jl) = results
(A_result, il_result, jl_result) = check
# convert to dense and compare
tm.assert_numpy_array_equal(A.todense(), A_result.todense())
# or compare directly as difference of sparse
# assert(abs(A - A_result).max() < 1e-12) # max is failing in python
# 2.6
assert il == il_result
assert jl == jl_result
def test_concat(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind='integer')
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind='block', fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_sparse_dense(self):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_value_counts(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_dup(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
# numeric op may cause sp_values to include the same value as
# fill_value
dense = pd.Series(vals, name='xx') / 0.
sparse = pd.SparseSeries(vals, name='xx') / 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx') * 0.
sparse = pd.SparseSeries(vals, name='xx') * 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_int(self):
vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
# fill_value is np.nan, but should not be included in the result
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_isna(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isna()
exp = pd.SparseSeries([True, True, False, False, True], name='xxx',
fill_value=True)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isna()
assert isinstance(res, pd.SparseSeries)
exp = pd.Series([True, False, False, False, False], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def test_notna(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.notna()
exp = pd.SparseSeries([False, False, True, True, False], name='xxx',
fill_value=False)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.notna()
assert isinstance(res, pd.SparseSeries)
exp = pd.Series([False, True, True, True, True], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def _dense_series_compare(s, f):
result = f(s)
assert (isinstance(result, SparseSeries))
dense_result = f(s.to_dense())
tm.assert_series_equal(result.to_dense(), dense_result)
class TestSparseSeriesAnalytics(object):
def setup_method(self, method):
arr, index = _test_data1()
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
def test_cumsum(self):
result = self.bseries.cumsum()
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
tm.assert_series_equal(result, expected)
axis = 1 # Series is 1-D, so only axis = 0 is valid.
msg = "No axis named {axis}".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
self.bseries.cumsum(axis=axis)
def test_numpy_cumsum(self):
result = np.cumsum(self.bseries)
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
result = np.cumsum(self.zbseries)
expected = self.zbseries.to_dense().cumsum()
tm.assert_series_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.bseries, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.zbseries, out=result)
def test_numpy_func_call(self):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var', 'mean',
'prod', 'cumprod', 'std', 'argsort',
'min', 'max']
for func in funcs:
for series in ('bseries', 'zbseries'):
getattr(np, func)(getattr(self, series))
def test_deprecated_numpy_func_call(self):
# NOTE: These should be add to the 'test_numpy_func_call' test above
# once the behavior of argmin/argmax is corrected.
funcs = ['argmin', 'argmax']
for func in funcs:
for series in ('bseries', 'zbseries'):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(np, func)(getattr(self, series))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(getattr(self, series), func)()
def test_deprecated_reindex_axis(self):
# https://github.com/pandas-dev/pandas/issues/17833
with tm.assert_produces_warning(FutureWarning) as m:
self.bseries.reindex_axis([0, 1, 2])
assert 'reindex' in str(m[0].message)
@pytest.mark.parametrize(
'datetime_type', (np.datetime64,
pd.Timestamp,
lambda x: datetime.strptime(x, '%Y-%m-%d')))
def test_constructor_dict_datetime64_index(datetime_type):
# GH 9456
dates = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
result = SparseSeries(dict(zip(map(datetime_type, dates), values)))
expected = SparseSeries(values, map(pd.Timestamp, dates))
tm.assert_sp_series_equal(result, expected)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
from oslo_config import cfg
from oslo_utils import importutils
from report.openstack.common import service
from report import wsgi
from report import rpc
from report import utils
from report import exception
from report import context
from report.i18n import _, _LE, _LI, _LW
from oslo_log import log as logging
import oslo_messaging as messaging
import os
import sys
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='Enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='A list of APIs with enabled SSL'),
cfg.StrOpt('reportapi_listen',
default="0.0.0.0",
help='The IP address on which the OpenStack API will listen.'),
cfg.IntOpt('reportapi_listen_port',
default=8888,
help='The port on which the OpenStack API will listen.'),
cfg.IntOpt('reportapi_workers',
help='Number of workers for OpenStack API service. The default '
'will be the number of CPUs available.'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='Full class name for the Manager for cert'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, *args, **kwargs):
super(Service, self).__init__()
"""
if not rpc.initialized():
rpc.init(CONF)
"""
self.host = host
self.binary = binary
self.topic = topic
LOG.debug("Topic:" + self.topic + "; Host:" + self.host)
self.manager_class_name = "report.rpt.manager.RptManager"
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
#setup_profiler(binary, host)
def start(self):
"""
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
self.manager.init_host()
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s", self.topic)
"""
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
#serializer = objects_base.CinderObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints)
self.rpcserver.start()
"""
self.manager.init_host_with_rpc()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(
self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
"""
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _create_service_ref(self, context):
"""
zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
"""
pass
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary
if not manager:
"""
subtopic = binary.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
"""
manager = "report.rpt.manager.RptManager"
"""
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
"""
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
"""
try:
self.service_ref.destroy()
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
"""
def stop(self):
try:
self.rpcserver.stop()
self.rpcserver.wait()
except Exception:
pass
"""
try:
self.manager.cleanup_host()
except Exception:
LOG.exception(_LE('Service error occurred during cleanup_host'))
pass
"""
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_LE('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, 1))
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
LOG.debug("WSGIServer init! name:%s, manager:%s, host:%s, port:%s" %
(self.name, str(self.manager), self.host, str(self.port)))
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
_launcher.wait()
|
|
import os, sys
sys.path.append(os.getcwd())
import time
import numpy as np
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.cifar10
import tflib.inception_score
import tflib.plot
# Download CIFAR-10 (Python version) at
# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the
# extracted files here!
DATA_DIR = 'data/cifar'
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_cifar.py!')
MODE = 'dcgan' # Valid options are dcgan, wgan, or wgan-gp
DIM = 128 # This overfits substantially; you're probably better off with 64
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 8 # Batch size
ITERS = 200000 # How many generator iterations to train for
OUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32)
N_NODES = 10
NODES = range(N_NODES)
with open('C3.csv','r') as file:
C=np.loadtxt(file,delimiter=',',dtype=float)
#C = np.ones([N_NODES,N_NODES]) / N_NODES
lib.print_model_settings(locals().copy())
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return LeakyReLU(output)
def Generator(n_samples, noise=None, index = ''):
if noise is None:
noise = tf.random_normal([n_samples, 128])
output = lib.ops.linear.Linear('Generator{}.Input'.format(index), 128, 4*4*4*DIM, noise)
output = lib.ops.batchnorm.Batchnorm('Generator{}.BN1'.format(index), [0], output)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator{}.2'.format(index), 4*DIM, 2*DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator{}.BN2'.format(index), [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator{}.3'.format(index), 2*DIM, DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator{}.BN3'.format(index), [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator{}.5'.format(index), DIM, 3, 5, output)
output = tf.tanh(output)
return tf.reshape(output, [-1, OUTPUT_DIM])
def Discriminator(inputs, index = ''):
output = tf.reshape(inputs, [-1, 3, 32, 32])
output = lib.ops.conv2d.Conv2D('Discriminator{}.1'.format(index), 3, DIM, 5, output, stride=2)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator{}.2'.format(index), DIM, 2*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator{}.BN2'.format(index), [0,2,3], output)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator{}.3'.format(index), 2*DIM, 4*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator{}.BN3'.format(index), [0,2,3], output)
output = LeakyReLU(output)
output = tf.reshape(output, [-1, 4*4*4*DIM])
output = lib.ops.linear.Linear('Discriminator{}.Output'.format(index), 4*4*4*DIM, 1, output)
return tf.reshape(output, [-1])
real_data_int = []
real_data = []
fake_data = []
disc_real = []
disc_fake = []
gen_params = []
disc_params = []
for i in NODES:
real_data_int.append(tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM]))
real_data.append(2*((tf.cast(real_data_int[i], tf.float32)/255.)-.5))
fake_data.append(Generator(BATCH_SIZE, index=i))
disc_real.append(Discriminator(real_data[i], index = i))
disc_fake.append(Discriminator(fake_data[i], index = i))
#gen_params.append(lib.params_with_name('Generator{}'.format(i)))
#disc_params.append(lib.params_with_name('Discriminator{}'.format(i)))
gen_params.append(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator{}'.format(i)))
disc_params.append(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator{}'.format(i)))
gen_cost = []
disc_cost = []
gen_train_op = []
disc_train_op = []
clip_disc_weights = []
for i in NODES:
if MODE == 'wgan':
gen_cost.append( -tf.reduce_mean(disc_fake[i]) )
disc_cost.append( tf.reduce_mean(disc_fake[i]) - tf.reduce_mean(disc_real[i]) )
gen_train_op.append(tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost[i], var_list=gen_params[i]))
disc_train_op.append(tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost[i], var_list=disc_params[i]))
clip_ops = []
for var in disc_params[i]:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights.append(tf.group(*clip_ops))
elif MODE == 'wgan-gp':
# Standard WGAN loss
gen_cost.append( -tf.reduce_mean(disc_fake[i]) )
disc_cost.append( tf.reduce_mean(disc_fake[i]) - tf.reduce_mean(disc_real[i]) )
# Gradient penalty
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data[i] - real_data[i]
interpolates = real_data[i] + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost[i] += LAMBDA*gradient_penalty
gen_train_op.append(tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost[i], var_list=gen_params[i]))
disc_train_op.append(tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost[i], var_list=disc_params[i]))
elif MODE == 'dcgan':
gen_cost.append( tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = disc_fake[i], labels = tf.ones_like(disc_fake[i]))) )
disc_cost.append( tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = disc_fake[i], labels = tf.zeros_like(disc_fake[i]))) )
disc_cost[i] += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = disc_real[i], labels = tf.ones_like(disc_real[i])))
disc_cost[i] /= 2.
gen_train_op.append( tf.train.MomentumOptimizer(learning_rate=2e-4, momentum=0.9, use_nesterov=True).minimize(gen_cost[i],
var_list=lib.params_with_name('Generator{}'.format(i))) )
disc_train_op.append( tf.train.MomentumOptimizer(learning_rate=2e-4, momentum=0.9, use_nesterov=True).minimize(disc_cost[i],
var_list=lib.params_with_name('Discriminator{}'.format(i))) )
# For generating samples
fixed_noise_128 = tf.constant(np.random.normal(size=(128, 128)).astype('float32'))
fixed_noise_samples_128_3 = Generator(128, noise=fixed_noise_128, index=3)
fixed_noise_samples_128_6 = Generator(128, noise=fixed_noise_128, index=6)
def generate_image_3(frame):
samples = session.run(fixed_noise_samples_128_3)
samples = ((samples+1.)*(255./2)).astype('int32')
lib.save_images.save_images(samples.reshape((128, 3, 32, 32)), 'samples_3_{}.png'.format(frame))
def generate_image_6(frame):
samples = session.run(fixed_noise_samples_128_6)
samples = ((samples+1.)*(255./2)).astype('int32')
lib.save_images.save_images(samples.reshape((128, 3, 32, 32)), 'samples_6_{}.png'.format(frame))
# For calculating inception score
#samples_100_array = [Generator(100, index=ind) for ind in NODES]
#def get_inception_score_node(node):
# all_samples = []
# for i in xrange(10):
# all_samples.append(session.run(samples_100_array[node]))
# all_samples = np.concatenate(all_samples, axis=0)
# all_samples = ((all_samples+1.)*(255./2)).astype('int32')
# all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0,2,3,1)
# return lib.inception_score.get_inception_score(list(all_samples))
samples_100 = Generator(100, index = 0)
def get_inception_score():
all_samples = []
for i in xrange(10):
all_samples.append(session.run(samples_100))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples+1.)*(255./2)).astype('int32')
all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0,2,3,1)
return lib.inception_score.get_inception_score(list(all_samples))
samples_100_3 = Generator(100, index = 3)
def get_inception_score_3():
all_samples = []
for i in xrange(10):
all_samples.append(session.run(samples_100_3))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples+1.)*(255./2)).astype('int32')
all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0,2,3,1)
return lib.inception_score.get_inception_score(list(all_samples))
samples_100_6 = Generator(100, index = 6)
def get_inception_score_6():
all_samples = []
for i in xrange(10):
all_samples.append(session.run(samples_100_6))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples+1.)*(255./2)).astype('int32')
all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0,2,3,1)
return lib.inception_score.get_inception_score(list(all_samples))
# Dataset iterators
train_gen = []
dev_gen = []
for nod in NODES:
tt, dd = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR, index=None)
train_gen.append(tt)
dev_gen.append(dd)
def inf_train_gen(nod):
while True:
for images,_ in train_gen[nod]():
yield images
# COMBINATION OP
C_tf = tf.constant(C)
ops = []
for i in NODES:
for o in range(len(gen_params[i])):
ops.append(
tf.assign(gen_params[i][o], tf.add_n( [ C[i,j] * gen_params[j][o] for j in NODES] )
))
for p in range(len(disc_params[i])):
ops.append(
tf.assign(disc_params[i][p], tf.add_n( [ C[i,j] * disc_params[j][p] for j in NODES] )
))
combination_op = tf.group(*ops)
gen_params_mean = []
disc_params_mean = []
for o in range(len(gen_params[0])):
gen_params_mean.append(
1.0/N_NODES * tf.add_n( [ gen_params[j][o] for j in NODES] )
)
for p in range(len(disc_params[0])):
disc_params_mean.append(
1.0/N_NODES * tf.add_n( [ disc_params[j][p] for j in NODES] )
)
gen_dist = [
tf.reduce_sum( [ tf.reduce_sum( tf.squared_difference(gen_params[i][o],gen_params_mean[o]) ) for o in range(len(gen_params[i]))] ) for i in NODES]
disc_dist = [
tf.reduce_sum( [ tf.reduce_sum( tf.squared_difference(disc_params[i][o],disc_params_mean[o]) ) for o in range(len(disc_params[i]))] ) for i in NODES]
gen_mean_norm = tf.reduce_sum( [ tf.reduce_sum( tf.square(gen_params_mean[o]) ) for o in range(len(gen_params_mean))] )
disc_mean_norm = tf.reduce_sum( [ tf.reduce_sum( tf.square(disc_params_mean[o]) ) for o in range(len(disc_params_mean))] )
saver = tf.train.Saver()
# Train loop
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = []
for nod in NODES:
gen.append(inf_train_gen(nod))
for iteration in xrange(ITERS):
start_time = time.time()
for node in NODES:
# Train generator
if iteration > 0:
_ = session.run(gen_train_op[node])
# Train critic
if MODE == 'dcgan':
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in xrange(disc_iters):
_data = gen[nod].next()
_disc_cost, _ = session.run([disc_cost[node], disc_train_op[node]], feed_dict={real_data_int[node]: _data})
if MODE == 'wgan':
_ = session.run(clip_disc_weights[node])
if (iteration % 100 == 99 or iteration < 10):
#lib.plot.plot('NODE {}: train disc cost'.format(node), _disc_cost)
print('iter {} NODE {}: train disc cost : {}, time: {}'.format(iteration,node,_disc_cost,time.time() - start_time) )
#lib.plot.plot('NODE {}: time'.format(node), time.time() - start_time)
#print('iter {} NODE {}: time'.format(iteration,node), time.time() - start_time)
#print('NODE 0',[session.run(gen_params[0][o]).shape for o in range(len(gen_params[0])) ] )
#print('NODE 1',[session.run(gen_params[1][o]).shape for o in range(len(gen_params[1])) ] )
if (iteration <= 500 or iteration % 100 == 99):
dm = session.run(disc_dist)
gm = session.run(gen_dist)
gw = session.run(gen_mean_norm)
dw = session.run(disc_mean_norm)
# IMPORTANT: second position is the norm of the mean!
with open('gen_mean.dat','ab') as file:
file.write(str(iteration)+','+str(gw)+','+','.join([str(g) for g in gm])+'\n')
with open('disc_mean.dat','ab') as file:
file.write(str(iteration)+','+str(dw)+','.join([str(d) for d in dm])+'\n')
print('iter {} gen_dists : {}'.format(iteration,gm))
print('iter {} disc_dists : {}'.format(iteration,dm))
session.run(combination_op)
if (iteration % 100 == 99 or iteration < 10):
print('Time of combination: {}'.format(time.time() - start_time) )
#if (iteration % 100 == 99 or iteration < 10):
# #lib.plot.plot('NODE {}: train disc cost'.format(node), _disc_cost)
# print('iter {} NODE {}: train disc cost'.format(iteration,nod), _disc_cost)
# #lib.plot.plot('NODE {}: time'.format(node), time.time() - start_time)
# print('iter {} NODE {}: time'.format(iteration,nod), time.time() - start_time)
# Calculate inception score every 1K iters
if iteration % 1000 == 999:
#inception_score_array = [get_inception_score_node(nod) for nod in NODES]
inception_score_3 = get_inception_score_3()
inception_score_6 = get_inception_score_6()
#lib.plot.plot('NODE 0: inception score', inception_score[0])
#for nnod in NODES:
# print('NODE {}: inception score {}'.format(nnod,inception_score_array[nnod][0]) )
# with open('inception_score_dist_{}.dat'.format(nnod),'ab') as file:
# file.write(str(iteration)+','+str(inception_score_array[nnod][0])+'\n')
print('NODE 3: inception score {}'.format(inception_score_3[0]) )
with open('inception_score_dist_3.dat','ab') as file:
file.write(str(iteration)+','+str(inception_score_3[0])+'\n')
print('NODE 6: inception score {}'.format(inception_score_6[0]) )
with open('inception_score_dist_6.dat','ab') as file:
file.write(str(iteration)+','+str(inception_score_6[0])+'\n')
if iteration % 5000 == 4999:
save_path = saver.save(session, "/tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
generate_image_3(iteration)
generate_image_6(iteration)
# Calculate dev loss and generate samples every 100 iters
#if iteration % 100 == 99:
# dev_disc_costs = []
# for images,_ in dev_gen[0]():
# _dev_disc_cost = session.run(disc_cost[0], feed_dict={real_data_int[0]: images})
# dev_disc_costs.append(_dev_disc_cost)
#lib.plot.plot('NODE {}: dev disc cost'.format(node), np.mean(dev_disc_costs))
# print('iter {} NODE 0: dev disc cost'.format(iteration), np.mean(dev_disc_costs))
#generate_image(iteration, _data)
if (iteration % 100 == 99 or iteration < 10):
print('Total time: {}'.format(time.time() - start_time) )
# Save logs every 100 iters
#if (iteration < 5) or (iteration % 100 == 99):
# lib.plot.flush()
#lib.plot.tick()
|
|
# killableprocess - subprocesses which can be reliably killed
#
# Parts of this module are copied from the subprocess.py file contained
# in the Python distribution.
#
# Copyright (c) 2003-2004 by Peter Astrand <astrand@lysator.liu.se>
#
# Additions and modifications written by Benjamin Smedberg
# <benjamin@smedbergs.us> are Copyright (c) 2006 by the Mozilla Foundation
# <http://www.mozilla.org/>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of the
# author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
r"""killableprocess - Subprocesses which can be reliably killed
This module is a subclass of the builtin "subprocess" module. It allows
processes that launch subprocesses to be reliably killed on Windows (via the Popen.kill() method.
It also adds a timeout argument to Wait() for a limited period of time before
forcefully killing the process.
Note: On Windows, this module requires Windows 2000 or higher (no support for
Windows 95, 98, or NT 4.0). It also requires ctypes, which is bundled with
Python 2.5+ or available from http://python.net/crew/theller/ctypes/
"""
import subprocess
import sys
import os
import time
import types
try:
from subprocess import CalledProcessError
except ImportError:
# Python 2.4 doesn't implement CalledProcessError
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
mswindows = (sys.platform == "win32")
if mswindows:
import winprocess
else:
import signal
def call(*args, **kwargs):
waitargs = {}
if "timeout" in kwargs:
waitargs["timeout"] = kwargs.pop("timeout")
return Popen(*args, **kwargs).wait(**waitargs)
def check_call(*args, **kwargs):
"""Call a program with an optional timeout. If the program has a non-zero
exit status, raises a CalledProcessError."""
retcode = call(*args, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = args[0]
raise CalledProcessError(retcode, cmd)
if not mswindows:
def DoNothing(*args):
pass
class Popen(subprocess.Popen):
if not mswindows:
# Override __init__ to set a preexec_fn
def __init__(self, *args, **kwargs):
if len(args) >= 7:
raise Exception("Arguments preexec_fn and after must be passed by keyword.")
real_preexec_fn = kwargs.pop("preexec_fn", None)
def setpgid_preexec_fn():
os.setpgid(0, 0)
if real_preexec_fn:
apply(real_preexec_fn)
kwargs['preexec_fn'] = setpgid_preexec_fn
subprocess.Popen.__init__(self, *args, **kwargs)
if mswindows:
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo,
creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
if not isinstance(args, types.StringTypes):
args = subprocess.list2cmdline(args)
if startupinfo is None:
startupinfo = winprocess.STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = int(p2cread)
startupinfo.hStdOutput = int(c2pwrite)
startupinfo.hStdError = int(errwrite)
if shell:
startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = winprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
# We create a new job for this process, so that we can kill
# the process and any sub-processes
self._job = winprocess.CreateJobObject()
creationflags |= winprocess.CREATE_SUSPENDED
creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
hp, ht, pid, tid = winprocess.CreateProcess(
executable, args,
None, None, # No special security
1, # Must inherit handles!
creationflags,
winprocess.EnvironmentBlock(env),
cwd, startupinfo)
self._child_created = True
self._handle = hp
self._thread = ht
self.pid = pid
winprocess.AssignProcessToJobObject(self._job, hp)
winprocess.ResumeThread(ht)
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def kill(self, group=True):
"""Kill the process. If group=True, all sub-processes will also be killed."""
if mswindows:
if group:
winprocess.TerminateJobObject(self._job, 127)
else:
winprocess.TerminateProcess(self._handle, 127)
self.returncode = 127
else:
if group:
os.killpg(self.pid, signal.SIGKILL)
else:
os.kill(self.pid, signal.SIGKILL)
self.returncode = -9
def wait(self, timeout=-1, group=True):
"""Wait for the process to terminate. Returns returncode attribute.
If timeout seconds are reached and the process has not terminated,
it will be forcefully killed. If timeout is -1, wait will not
time out."""
if self.returncode is not None:
return self.returncode
if mswindows:
if timeout != -1:
timeout = timeout * 1000
rc = winprocess.WaitForSingleObject(self._handle, timeout)
if rc == winprocess.WAIT_TIMEOUT:
self.kill(group)
else:
self.returncode = winprocess.GetExitCodeProcess(self._handle)
else:
if timeout == -1:
subprocess.Popen.wait(self)
return self.returncode
starttime = time.time()
# Make sure there is a signal handler for SIGCHLD installed
oldsignal = signal.signal(signal.SIGCHLD, DoNothing)
while time.time() < starttime + timeout - 0.01:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid != 0:
self._handle_exitstatus(sts)
signal.signal(signal.SIGCHLD, oldsignal)
return self.returncode
# time.sleep is interrupted by signals (good!)
newtimeout = timeout - time.time() + starttime
time.sleep(newtimeout)
self.kill(group)
signal.signal(signal.SIGCHLD, oldsignal)
subprocess.Popen.wait(self)
return self.returncode
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo import messaging
_DEFAULT_AUTH_METHODS = ['external', 'password', 'token']
_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
FILE_OPTIONS = {
None: [
cfg.StrOpt('admin_token', secret=True, default='ADMIN',
help='A "shared secret" that can be used to bootstrap '
'Keystone. This "token" does not represent a user, '
'and carries no explicit authorization. To disable '
'in production (highly recommended), remove '
'AdminTokenAuthMiddleware from your paste '
'application pipelines (for example, in '
'keystone-paste.ini).'),
cfg.StrOpt('public_bind_host',
default='0.0.0.0',
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT')],
help='The IP address of the network interface for the '
'public service to listen on.'),
cfg.StrOpt('admin_bind_host',
default='0.0.0.0',
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT')],
help='The IP address of the network interface for the '
'admin service to listen on.'),
cfg.IntOpt('compute_port', default=8774,
help='(Deprecated) The port which the OpenStack Compute '
'service listens on. This option was only used for '
'string replacement in the templated catalog backend. '
'Templated catalogs should replace the '
'"$(compute_port)s" substitution with the static port '
'of the compute service. As of Juno, this option is '
'deprecated and will be removed in the L release.'),
cfg.IntOpt('admin_port', default=35357,
help='The port number which the admin service listens '
'on.'),
cfg.IntOpt('public_port', default=5000,
help='The port number which the public service listens '
'on.'),
cfg.StrOpt('public_endpoint',
help='The base public endpoint URL for Keystone that is '
'advertised to clients (NOTE: this does NOT affect '
'how Keystone listens for connections). '
'Defaults to the base host URL of the request. E.g. a '
'request to http://server:5000/v2.0/users will '
'default to http://server:5000. You should only need '
'to set this value if the base URL contains a path '
'(e.g. /prefix/v2.0) or the endpoint should be found '
'on a different server.'),
cfg.StrOpt('admin_endpoint',
help='The base admin endpoint URL for Keystone that is '
'advertised to clients (NOTE: this does NOT affect '
'how Keystone listens for connections). '
'Defaults to the base host URL of the request. E.g. a '
'request to http://server:35357/v2.0/users will '
'default to http://server:35357. You should only need '
'to set this value if the base URL contains a path '
'(e.g. /prefix/v2.0) or the endpoint should be found '
'on a different server.'),
cfg.IntOpt('max_project_tree_depth', default=5,
help='The hierarchy depth on Keystone is restricted due to '
'performance issues.'),
cfg.IntOpt('public_workers', default=1,
help='The number of worker processes to serve the public '
'WSGI application'),
cfg.IntOpt('admin_workers', default=1,
help='The number of worker processes to serve the admin '
'WSGI application'),
# default max request size is 112k
cfg.IntOpt('max_request_body_size', default=114688,
help='Enforced by optional sizelimit middleware '
'(keystone.middleware:RequestBodySizeLimiter).'),
cfg.IntOpt('max_param_size', default=64,
help='Limit the sizes of user & project ID/names.'),
# we allow tokens to be a bit larger to accommodate PKI
cfg.IntOpt('max_token_size', default=8192,
help='Similar to max_param_size, but provides an '
'exception for token values.'),
cfg.StrOpt('member_role_id',
default='9fe2ff9ee4384b1894a90878d3e92bab',
help='During a SQL upgrade member_role_id will be used '
'to create a new role that will replace records in '
'the assignment table with explicit role grants. '
'After migration, the member_role_id will be used in '
'the API add_user_to_project.'),
cfg.StrOpt('member_role_name', default='_member_',
help='During a SQL upgrade member_role_name will be used '
'to create a new role that will replace records in '
'the assignment table with explicit role grants. '
'After migration, member_role_name will be ignored.'),
cfg.IntOpt('crypt_strength', default=40000,
help='The value passed as the keyword "rounds" to '
'passlib\'s encrypt method.'),
cfg.BoolOpt('tcp_keepalive', default=False,
help='Set this to true if you want to enable '
'TCP_KEEPALIVE on server sockets, i.e. sockets used '
'by the Keystone wsgi server for client '
'connections.'),
cfg.IntOpt('tcp_keepidle',
default=600,
help='Sets the value of TCP_KEEPIDLE in seconds for each '
'server socket. Only applies if tcp_keepalive is '
'true. Not supported on OS X.'),
cfg.IntOpt('list_limit',
help='The maximum number of entities that will be '
'returned in a collection, with no limit set by '
'default. This global limit may be then overridden '
'for a specific driver, by specifying a list_limit '
'in the appropriate section (e.g. [assignment]).'),
cfg.BoolOpt('domain_id_immutable', default=True,
help='Set this to false if you want to enable the '
'ability for user, group and project entities '
'to be moved between domains by updating their '
'domain_id. Allowing such movement is not '
'recommended if the scope of a domain admin is being '
'restricted by use of an appropriate policy file '
'(see policy.v3cloudsample as an example).'),
cfg.BoolOpt('strict_password_check', default=False,
help='If set to true, strict password length checking is '
'performed for password manipulation. If a password '
'exceeds the maximum length, the operation will fail '
'with an HTTP 403 Forbidden error. If set to false, '
'passwords are automatically truncated to the '
'maximum length.')],
'identity': [
cfg.StrOpt('default_domain_id', default='default',
help='This references the domain to use for all '
'Identity API v2 requests (which are not aware of '
'domains). A domain with this ID will be created '
'for you by keystone-manage db_sync in migration '
'008. The domain referenced by this ID cannot be '
'deleted on the v3 API, to prevent accidentally '
'breaking the v2 API. There is nothing special about '
'this domain, other than the fact that it must '
'exist to order to maintain support for your v2 '
'clients.'),
cfg.BoolOpt('domain_specific_drivers_enabled',
default=False,
help='A subset (or all) of domains can have their own '
'identity driver, each with their own partial '
'configuration file in a domain configuration '
'directory. Only values specific to the domain '
'need to be placed in the domain specific '
'configuration file. This feature is disabled by '
'default; set to true to enable.'),
cfg.StrOpt('domain_config_dir',
default='/etc/keystone/domains',
help='Path for Keystone to locate the domain specific '
'identity configuration files if '
'domain_specific_drivers_enabled is set to true.'),
cfg.StrOpt('driver',
default=('keystone.identity.backends'
'.sql.Identity'),
help='Identity backend driver.'),
cfg.IntOpt('max_password_length', default=4096,
help='Maximum supported length for user passwords; '
'decrease to improve performance.'),
cfg.IntOpt('list_limit',
help='Maximum number of entities that will be returned in '
'an identity collection.'),
],
'identity_mapping': [
cfg.StrOpt('driver',
default=('keystone.identity.mapping_backends'
'.sql.Mapping'),
help='Keystone Identity Mapping backend driver.'),
cfg.StrOpt('generator',
default=('keystone.identity.id_generators'
'.sha256.Generator'),
help='Public ID generator for user and group entities. '
'The Keystone identity mapper only supports '
'generators that produce no more than 64 characters.'),
cfg.BoolOpt('backward_compatible_ids',
default=True,
help='The format of user and group IDs changed '
'in Juno for backends that do not generate UUIDs '
'(e.g. LDAP), with keystone providing a hash mapping '
'to the underlying attribute in LDAP. By default '
'this mapping is disabled, which ensures that '
'existing IDs will not change. Even when the '
'mapping is enabled by using domain specific '
'drivers, any users and groups from the default '
'domain being handled by LDAP will still not be '
'mapped to ensure their IDs remain backward '
'compatible. Setting this value to False will '
'enable the mapping for even the default LDAP '
'driver. It is only safe to do this if you do not '
'already have assignments for users and '
'groups from the default LDAP domain, and it is '
'acceptable for Keystone to provide the different '
'IDs to clients than it did previously. Typically '
'this means that the only time you can set this '
'value to False is when configuring a fresh '
'installation.'),
],
'trust': [
cfg.BoolOpt('enabled', default=True,
help='Delegation and impersonation features can be '
'optionally disabled.'),
cfg.StrOpt('driver',
default='keystone.trust.backends.sql.Trust',
help='Trust backend driver.')],
'os_inherit': [
cfg.BoolOpt('enabled', default=False,
help='role-assignment inheritance to projects from '
'owning domain can be optionally enabled.'),
],
'token': [
cfg.ListOpt('bind', default=[],
help='External auth mechanisms that should add bind '
'information to token, e.g., kerberos,x509.'),
cfg.StrOpt('enforce_token_bind', default='permissive',
help='Enforcement policy on tokens presented to Keystone '
'with bind information. One of disabled, permissive, '
'strict, required or a specifically required bind '
'mode, e.g., kerberos or x509 to require binding to '
'that authentication.'),
cfg.IntOpt('expiration', default=3600,
help='Amount of time a token should remain valid '
'(in seconds).'),
cfg.StrOpt('provider',
help='Controls the token construction, validation, and '
'revocation operations. Core providers are '
'"keystone.token.providers.[pkiz|pki|uuid].'
'Provider". The default provider is uuid.'),
cfg.StrOpt('driver',
default='keystone.token.persistence.backends.sql.Token',
help='Token persistence backend driver.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for token system caching. This has no '
'effect unless global caching is enabled.'),
cfg.IntOpt('revocation_cache_time', default=3600,
help='Time to cache the revocation list and the revocation '
'events if revoke extension is enabled (in seconds). '
'This has no effect unless global and token '
'caching are enabled.'),
cfg.IntOpt('cache_time',
help='Time to cache tokens (in seconds). This has no '
'effect unless global and token caching are '
'enabled.'),
cfg.BoolOpt('revoke_by_id', default=True,
help='Revoke token by token identifier. Setting '
'revoke_by_id to true enables various forms of '
'enumerating tokens, e.g. `list tokens for user`. '
'These enumerations are processed to determine the '
'list of tokens to revoke. Only disable if you are '
'switching to using the Revoke extension with a '
'backend other than KVS, which stores events in memory.'),
cfg.StrOpt('hash_algorithm', default='md5',
help="The hash algorithm to use for PKI tokens. This can "
"be set to any algorithm that hashlib supports. "
"WARNING: Before changing this value, the auth_token "
"middleware must be configured with the "
"hash_algorithms, otherwise token revocation will "
"not be processed correctly."),
],
'revoke': [
cfg.StrOpt('driver',
default='keystone.contrib.revoke.backends.kvs.Revoke',
help='An implementation of the backend for persisting '
'revocation events.'),
cfg.IntOpt('expiration_buffer', default=1800,
help='This value (calculated in seconds) is added to token '
'expiration before a revocation event may be removed '
'from the backend.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for revocation event caching. This has no '
'effect unless global caching is enabled.'),
],
'cache': [
cfg.StrOpt('config_prefix', default='cache.keystone',
help='Prefix for building the configuration dictionary '
'for the cache region. This should not need to be '
'changed unless there is another dogpile.cache '
'region with the same configuration name.'),
cfg.IntOpt('expiration_time', default=600,
help='Default TTL, in seconds, for any cached item in '
'the dogpile.cache region. This applies to any '
'cached method that doesn\'t have an explicit '
'cache expiration time defined for it.'),
# NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
# and other such single-process/thread deployments. Running
# dogpile.cache.memory in any other configuration has the same pitfalls
# as the KVS token backend. It is recommended that either Redis or
# Memcached are used as the dogpile backend for real workloads. To
# prevent issues with the memory cache ending up in "production"
# unintentionally, we register a no-op as the keystone default caching
# backend.
cfg.StrOpt('backend', default='keystone.common.cache.noop',
help='Dogpile.cache backend module. It is recommended '
'that Memcache (dogpile.cache.memcached) or Redis '
'(dogpile.cache.redis) be used in production '
'deployments. Small workloads (single process) '
'like devstack can use the dogpile.cache.memory '
'backend.'),
cfg.MultiStrOpt('backend_argument', default=[],
help='Arguments supplied to the backend module. '
'Specify this option once per argument to be '
'passed to the dogpile.cache backend. Example '
'format: "<argname>:<value>".'),
cfg.ListOpt('proxies', default=[],
help='Proxy classes to import that will affect the way '
'the dogpile.cache backend functions. See the '
'dogpile.cache documentation on '
'changing-backend-behavior.'),
cfg.BoolOpt('enabled', default=False,
help='Global toggle for all caching using the '
'should_cache_fn mechanism.'),
cfg.BoolOpt('debug_cache_backend', default=False,
help='Extra debugging from the cache backend (cache '
'keys, get/set/delete/etc calls). This is only '
'really useful if you need to see the specific '
'cache-backend get/set/delete calls with the '
'keys/values. Typically this should be left set '
'to false.'),
],
'ssl': [
cfg.BoolOpt('enable', default=False,
help='Toggle for SSL support on the Keystone '
'eventlet servers.'),
cfg.StrOpt('certfile',
default="/etc/keystone/ssl/certs/keystone.pem",
help='Path of the certfile for SSL. For non-production '
'environments, you may be interested in using '
'`keystone-manage ssl_setup` to generate self-signed '
'certificates.'),
cfg.StrOpt('keyfile',
default='/etc/keystone/ssl/private/keystonekey.pem',
help='Path of the keyfile for SSL.'),
cfg.StrOpt('ca_certs',
default='/etc/keystone/ssl/certs/ca.pem',
help='Path of the ca cert file for SSL.'),
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
help='Path of the CA key file for SSL.'),
cfg.BoolOpt('cert_required', default=False,
help='Require client certificate.'),
cfg.IntOpt('key_size', default=1024,
help='SSL key length (in bits) (auto generated '
'certificate).'),
cfg.IntOpt('valid_days', default=3650,
help='Days the certificate is valid for once signed '
'(auto generated certificate).'),
cfg.StrOpt('cert_subject',
default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost',
help='SSL certificate subject (auto generated '
'certificate).'),
],
'signing': [
cfg.StrOpt('token_format',
help='Deprecated in favor of provider in the '
'[token] section.'),
cfg.StrOpt('certfile',
default=_CERTFILE,
help='Path of the certfile for token signing. For '
'non-production environments, you may be interested '
'in using `keystone-manage pki_setup` to generate '
'self-signed certificates.'),
cfg.StrOpt('keyfile',
default=_KEYFILE,
help='Path of the keyfile for token signing.'),
cfg.StrOpt('ca_certs',
default='/etc/keystone/ssl/certs/ca.pem',
help='Path of the CA for token signing.'),
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
help='Path of the CA key for token signing.'),
cfg.IntOpt('key_size', default=2048,
help='Key size (in bits) for token signing cert '
'(auto generated certificate).'),
cfg.IntOpt('valid_days', default=3650,
help='Days the token signing cert is valid for '
'(auto generated certificate).'),
cfg.StrOpt('cert_subject',
default=('/C=US/ST=Unset/L=Unset/O=Unset/'
'CN=www.example.com'),
help='Certificate subject (auto generated certificate) for '
'token signing.'),
],
'assignment': [
# assignment has no default for backward compatibility reasons.
# If assignment driver is not specified, the identity driver chooses
# the backend
cfg.StrOpt('driver',
help='Assignment backend driver.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for assignment caching. This has no effect '
'unless global caching is enabled.'),
cfg.IntOpt('cache_time',
help='TTL (in seconds) to cache assignment data. This has '
'no effect unless global caching is enabled.'),
cfg.IntOpt('list_limit',
help='Maximum number of entities that will be returned '
'in an assignment collection.'),
],
'credential': [
cfg.StrOpt('driver',
default=('keystone.credential.backends'
'.sql.Credential'),
help='Credential backend driver.'),
],
'oauth1': [
cfg.StrOpt('driver',
default='keystone.contrib.oauth1.backends.sql.OAuth1',
help='Credential backend driver.'),
cfg.IntOpt('request_token_duration', default=28800,
help='Duration (in seconds) for the OAuth Request Token.'),
cfg.IntOpt('access_token_duration', default=86400,
help='Duration (in seconds) for the OAuth Access Token.'),
],
'federation': [
cfg.StrOpt('driver',
default='keystone.contrib.federation.'
'backends.sql.Federation',
help='Federation backend driver.'),
cfg.StrOpt('assertion_prefix', default='',
help='Value to be used when filtering assertion parameters '
'from the environment.'),
],
'policy': [
cfg.StrOpt('driver',
default='keystone.policy.backends.sql.Policy',
help='Policy backend driver.'),
cfg.IntOpt('list_limit',
help='Maximum number of entities that will be returned '
'in a policy collection.'),
],
'ec2': [
cfg.StrOpt('driver',
default='keystone.contrib.ec2.backends.kvs.Ec2',
help='EC2Credential backend driver.'),
],
'endpoint_filter': [
cfg.StrOpt('driver',
default='keystone.contrib.endpoint_filter.backends'
'.sql.EndpointFilter',
help='Endpoint Filter backend driver'),
cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True,
help='Toggle to return all active endpoints if no filter '
'exists.'),
],
'endpoint_policy': [
cfg.StrOpt('driver',
default='keystone.contrib.endpoint_policy.backends'
'.sql.EndpointPolicy',
help='Endpoint policy backend driver'),
],
'stats': [
cfg.StrOpt('driver',
default=('keystone.contrib.stats.backends'
'.kvs.Stats'),
help='Stats backend driver.'),
],
'ldap': [
cfg.StrOpt('url', default='ldap://localhost',
help='URL for connecting to the LDAP server.'),
cfg.StrOpt('user',
help='User BindDN to query the LDAP server.'),
cfg.StrOpt('password', secret=True,
help='Password for the BindDN to query the LDAP server.'),
cfg.StrOpt('suffix', default='cn=example,cn=com',
help='LDAP server suffix'),
cfg.BoolOpt('use_dumb_member', default=False,
help='If true, will add a dummy member to groups. This is '
'required if the objectclass for groups requires the '
'"member" attribute.'),
cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent',
help='DN of the "dummy member" to use when '
'"use_dumb_member" is enabled.'),
cfg.BoolOpt('allow_subtree_delete', default=False,
help='Delete subtrees using the subtree delete control. '
'Only enable this option if your LDAP server '
'supports subtree deletion.'),
cfg.StrOpt('query_scope', default='one',
help='The LDAP scope for queries, this can be either '
'"one" (onelevel/singleLevel) or "sub" '
'(subtree/wholeSubtree).'),
cfg.IntOpt('page_size', default=0,
help='Maximum results per page; a value of zero ("0") '
'disables paging.'),
cfg.StrOpt('alias_dereferencing', default='default',
help='The LDAP dereferencing option for queries. This '
'can be either "never", "searching", "always", '
'"finding" or "default". The "default" option falls '
'back to using default dereferencing configured by '
'your ldap.conf.'),
cfg.IntOpt('debug_level',
help='Sets the LDAP debugging level for LDAP calls. '
'A value of 0 means that debugging is not enabled. '
'This value is a bitmask, consult your LDAP '
'documentation for possible values.'),
cfg.BoolOpt('chase_referrals',
help='Override the system\'s default referral chasing '
'behavior for queries.'),
cfg.StrOpt('user_tree_dn',
help='Search base for users.'),
cfg.StrOpt('user_filter',
help='LDAP search filter for users.'),
cfg.StrOpt('user_objectclass', default='inetOrgPerson',
help='LDAP objectclass for users.'),
cfg.StrOpt('user_id_attribute', default='cn',
help='LDAP attribute mapped to user id. '
'WARNING: must not be a multivalued attribute.'),
cfg.StrOpt('user_name_attribute', default='sn',
help='LDAP attribute mapped to user name.'),
cfg.StrOpt('user_mail_attribute', default='mail',
help='LDAP attribute mapped to user email.'),
cfg.StrOpt('user_pass_attribute', default='userPassword',
help='LDAP attribute mapped to password.'),
cfg.StrOpt('user_enabled_attribute', default='enabled',
help='LDAP attribute mapped to user enabled flag.'),
cfg.BoolOpt('user_enabled_invert', default=False,
help='Invert the meaning of the boolean enabled values. '
'Some LDAP servers use a boolean lock attribute '
'where "true" means an account is disabled. Setting '
'"user_enabled_invert = true" will allow these lock '
'attributes to be used. This setting will have no '
'effect if "user_enabled_mask" or '
'"user_enabled_emulation" settings are in use.'),
cfg.IntOpt('user_enabled_mask', default=0,
help='Bitmask integer to indicate the bit that the enabled '
'value is stored in if the LDAP server represents '
'"enabled" as a bit on an integer rather than a '
'boolean. A value of "0" indicates the mask is not '
'used. If this is not set to "0" the typical value '
'is "2". This is typically used when '
'"user_enabled_attribute = userAccountControl".'),
cfg.StrOpt('user_enabled_default', default='True',
help='Default value to enable users. This should match an '
'appropriate int value if the LDAP server uses '
'non-boolean (bitmask) values to indicate if a user '
'is enabled or disabled. If this is not set to "True" '
'the typical value is "512". This is typically used '
'when "user_enabled_attribute = userAccountControl".'),
cfg.ListOpt('user_attribute_ignore',
default=['default_project_id', 'tenants'],
help='List of attributes stripped off the user on '
'update.'),
cfg.StrOpt('user_default_project_id_attribute',
help='LDAP attribute mapped to default_project_id for '
'users.'),
cfg.BoolOpt('user_allow_create', default=True,
help='Allow user creation in LDAP backend.'),
cfg.BoolOpt('user_allow_update', default=True,
help='Allow user updates in LDAP backend.'),
cfg.BoolOpt('user_allow_delete', default=True,
help='Allow user deletion in LDAP backend.'),
cfg.BoolOpt('user_enabled_emulation', default=False,
help='If true, Keystone uses an alternative method to '
'determine if a user is enabled or not by checking '
'if they are a member of the '
'"user_enabled_emulation_dn" group.'),
cfg.StrOpt('user_enabled_emulation_dn',
help='DN of the group entry to hold enabled users when '
'using enabled emulation.'),
cfg.ListOpt('user_additional_attribute_mapping',
default=[],
help='List of additional LDAP attributes used for mapping '
'additional attribute mappings for users. Attribute '
'mapping format is <ldap_attr>:<user_attr>, where '
'ldap_attr is the attribute in the LDAP entry and '
'user_attr is the Identity API attribute.'),
cfg.StrOpt('project_tree_dn',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_tree_dn', group='ldap')],
help='Search base for projects'),
cfg.StrOpt('project_filter',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_filter', group='ldap')],
help='LDAP search filter for projects.'),
cfg.StrOpt('project_objectclass', default='groupOfNames',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_objectclass', group='ldap')],
help='LDAP objectclass for projects.'),
cfg.StrOpt('project_id_attribute', default='cn',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_id_attribute', group='ldap')],
help='LDAP attribute mapped to project id.'),
cfg.StrOpt('project_member_attribute', default='member',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_member_attribute', group='ldap')],
help='LDAP attribute mapped to project membership for '
'user.'),
cfg.StrOpt('project_name_attribute', default='ou',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_name_attribute', group='ldap')],
help='LDAP attribute mapped to project name.'),
cfg.StrOpt('project_desc_attribute', default='description',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_desc_attribute', group='ldap')],
help='LDAP attribute mapped to project description.'),
cfg.StrOpt('project_enabled_attribute', default='enabled',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_enabled_attribute', group='ldap')],
help='LDAP attribute mapped to project enabled.'),
cfg.StrOpt('project_domain_id_attribute',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_domain_id_attribute', group='ldap')],
default='businessCategory',
help='LDAP attribute mapped to project domain_id.'),
cfg.ListOpt('project_attribute_ignore', default=[],
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_attribute_ignore', group='ldap')],
help='List of attributes stripped off the project on '
'update.'),
cfg.BoolOpt('project_allow_create', default=True,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_allow_create', group='ldap')],
help='Allow project creation in LDAP backend.'),
cfg.BoolOpt('project_allow_update', default=True,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_allow_update', group='ldap')],
help='Allow project update in LDAP backend.'),
cfg.BoolOpt('project_allow_delete', default=True,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_allow_delete', group='ldap')],
help='Allow project deletion in LDAP backend.'),
cfg.BoolOpt('project_enabled_emulation', default=False,
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_enabled_emulation', group='ldap')],
help='If true, Keystone uses an alternative method to '
'determine if a project is enabled or not by '
'checking if they are a member of the '
'"project_enabled_emulation_dn" group.'),
cfg.StrOpt('project_enabled_emulation_dn',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_enabled_emulation_dn', group='ldap')],
help='DN of the group entry to hold enabled projects when '
'using enabled emulation.'),
cfg.ListOpt('project_additional_attribute_mapping',
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_additional_attribute_mapping', group='ldap')],
default=[],
help='Additional attribute mappings for projects. '
'Attribute mapping format is '
'<ldap_attr>:<user_attr>, where ldap_attr is the '
'attribute in the LDAP entry and user_attr is the '
'Identity API attribute.'),
cfg.StrOpt('role_tree_dn',
help='Search base for roles.'),
cfg.StrOpt('role_filter',
help='LDAP search filter for roles.'),
cfg.StrOpt('role_objectclass', default='organizationalRole',
help='LDAP objectclass for roles.'),
cfg.StrOpt('role_id_attribute', default='cn',
help='LDAP attribute mapped to role id.'),
cfg.StrOpt('role_name_attribute', default='ou',
help='LDAP attribute mapped to role name.'),
cfg.StrOpt('role_member_attribute', default='roleOccupant',
help='LDAP attribute mapped to role membership.'),
cfg.ListOpt('role_attribute_ignore', default=[],
help='List of attributes stripped off the role on '
'update.'),
cfg.BoolOpt('role_allow_create', default=True,
help='Allow role creation in LDAP backend.'),
cfg.BoolOpt('role_allow_update', default=True,
help='Allow role update in LDAP backend.'),
cfg.BoolOpt('role_allow_delete', default=True,
help='Allow role deletion in LDAP backend.'),
cfg.ListOpt('role_additional_attribute_mapping',
default=[],
help='Additional attribute mappings for roles. Attribute '
'mapping format is <ldap_attr>:<user_attr>, where '
'ldap_attr is the attribute in the LDAP entry and '
'user_attr is the Identity API attribute.'),
cfg.StrOpt('group_tree_dn',
help='Search base for groups.'),
cfg.StrOpt('group_filter',
help='LDAP search filter for groups.'),
cfg.StrOpt('group_objectclass', default='groupOfNames',
help='LDAP objectclass for groups.'),
cfg.StrOpt('group_id_attribute', default='cn',
help='LDAP attribute mapped to group id.'),
cfg.StrOpt('group_name_attribute', default='ou',
help='LDAP attribute mapped to group name.'),
cfg.StrOpt('group_member_attribute', default='member',
help='LDAP attribute mapped to show group membership.'),
cfg.StrOpt('group_desc_attribute', default='description',
help='LDAP attribute mapped to group description.'),
cfg.ListOpt('group_attribute_ignore', default=[],
help='List of attributes stripped off the group on '
'update.'),
cfg.BoolOpt('group_allow_create', default=True,
help='Allow group creation in LDAP backend.'),
cfg.BoolOpt('group_allow_update', default=True,
help='Allow group update in LDAP backend.'),
cfg.BoolOpt('group_allow_delete', default=True,
help='Allow group deletion in LDAP backend.'),
cfg.ListOpt('group_additional_attribute_mapping',
default=[],
help='Additional attribute mappings for groups. Attribute '
'mapping format is <ldap_attr>:<user_attr>, where '
'ldap_attr is the attribute in the LDAP entry and '
'user_attr is the Identity API attribute.'),
cfg.StrOpt('tls_cacertfile',
help='CA certificate file path for communicating with '
'LDAP servers.'),
cfg.StrOpt('tls_cacertdir',
help='CA certificate directory path for communicating with '
'LDAP servers.'),
cfg.BoolOpt('use_tls', default=False,
help='Enable TLS for communicating with LDAP servers.'),
cfg.StrOpt('tls_req_cert', default='demand',
help='Valid options for tls_req_cert are demand, never, '
'and allow.'),
cfg.BoolOpt('use_pool', default=False,
help='Enable LDAP connection pooling.'),
cfg.IntOpt('pool_size', default=10,
help='Connection pool size.'),
cfg.IntOpt('pool_retry_max', default=3,
help='Maximum count of reconnect trials.'),
cfg.FloatOpt('pool_retry_delay', default=0.1,
help='Time span in seconds to wait between two '
'reconnect trials.'),
cfg.IntOpt('pool_connection_timeout', default=-1,
help='Connector timeout in seconds. Value -1 indicates '
'indefinite wait for response.'),
cfg.IntOpt('pool_connection_lifetime', default=600,
help='Connection lifetime in seconds.'),
cfg.BoolOpt('use_auth_pool', default=False,
help='Enable LDAP connection pooling for end user '
'authentication. If use_pool is disabled, then this '
'setting is meaningless and is not used at all.'),
cfg.IntOpt('auth_pool_size', default=100,
help='End user auth connection pool size.'),
cfg.IntOpt('auth_pool_connection_lifetime', default=60,
help='End user auth connection lifetime in seconds.'),
],
'auth': [
cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
help='Default auth methods.'),
cfg.StrOpt('password',
default='keystone.auth.plugins.password.Password',
help='The password auth plugin module.'),
cfg.StrOpt('token',
default='keystone.auth.plugins.token.Token',
help='The token auth plugin module.'),
# deals with REMOTE_USER authentication
cfg.StrOpt('external',
default='keystone.auth.plugins.external.DefaultDomain',
help='The external (REMOTE_USER) auth plugin module.'),
],
'paste_deploy': [
cfg.StrOpt('config_file', default='keystone-paste.ini',
help='Name of the paste configuration file that defines '
'the available pipelines.'),
],
'memcache': [
cfg.ListOpt('servers', default=['localhost:11211'],
help='Memcache servers in the format of "host:port".'),
cfg.IntOpt('max_compare_and_set_retry', default=16,
help='Number of compare-and-set attempts to make when '
'using compare-and-set in the token memcache back '
'end.'),
],
'catalog': [
cfg.StrOpt('template_file',
default='default_catalog.templates',
help='Catalog template file name for use with the '
'template catalog backend.'),
cfg.StrOpt('driver',
default='keystone.catalog.backends.sql.Catalog',
help='Catalog backend driver.'),
cfg.BoolOpt('caching', default=True,
help='Toggle for catalog caching. This has no '
'effect unless global caching is enabled.'),
cfg.IntOpt('cache_time',
help='Time to cache catalog data (in seconds). This has no '
'effect unless global and catalog caching are '
'enabled.'),
cfg.IntOpt('list_limit',
help='Maximum number of entities that will be returned '
'in a catalog collection.'),
cfg.ListOpt('endpoint_substitution_whitelist',
default=['tenant_id', 'user_id', 'public_bind_host',
'admin_bind_host', 'compute_host', 'compute_port',
'admin_port', 'public_port', 'public_endpoint',
'admin_endpoint'],
help='(Deprecated) List of possible substitutions for use '
'in formatting endpoints. Use caution when modifying '
'this list. It will give users with permission to '
'create endpoints the ability to see those values '
'in your configuration file. This option will be '
'removed in Juno.'),
],
'kvs': [
cfg.ListOpt('backends', default=[],
help='Extra dogpile.cache backend modules to register '
'with the dogpile.cache library.'),
cfg.StrOpt('config_prefix', default='keystone.kvs',
help='Prefix for building the configuration dictionary '
'for the KVS region. This should not need to be '
'changed unless there is another dogpile.cache '
'region with the same configuration name.'),
cfg.BoolOpt('enable_key_mangler', default=True,
help='Toggle to disable using a key-mangling function '
'to ensure fixed length keys. This is toggle-able '
'for debugging purposes, it is highly recommended '
'to always leave this set to true.'),
cfg.IntOpt('default_lock_timeout', default=5,
help='Default lock timeout for distributed locking.'),
],
'saml': [
cfg.IntOpt('assertion_expiration_time', default=3600,
help='Default TTL, in seconds, for any generated SAML '
'assertion created by Keystone.'),
cfg.StrOpt('xmlsec1_binary',
default='xmlsec1',
help='Binary to be called for XML signing. Install the '
'appropriate package, specify absolute path or adjust '
'your PATH environment variable if the binary cannot '
'be found.'),
cfg.StrOpt('certfile',
default=_CERTFILE,
help='Path of the certfile for SAML signing. For '
'non-production environments, you may be interested '
'in using `keystone-manage pki_setup` to generate '
'self-signed certificates. Note, the path cannot '
'contain a comma.'),
cfg.StrOpt('keyfile',
default=_KEYFILE,
help='Path of the keyfile for SAML signing. Note, the path '
'cannot contain a comma.'),
cfg.StrOpt('idp_entity_id',
help='Entity ID value for unique Identity Provider '
'identification. Usually FQDN is set with a suffix. '
'A value is required to generate IDP Metadata. '
'For example: https://keystone.example.com/v3/'
'OS-FEDERATION/saml2/idp'),
cfg.StrOpt('idp_sso_endpoint',
help='Identity Provider Single-Sign-On service value, '
'required in the Identity Provider\'s metadata. '
'A value is required to generate IDP Metadata. '
'For example: https://keystone.example.com/v3/'
'OS-FEDERATION/saml2/sso'),
cfg.StrOpt('idp_lang', default='en',
help='Language used by the organization.'),
cfg.StrOpt('idp_organization_name',
help='Organization name the installation belongs to.'),
cfg.StrOpt('idp_organization_display_name',
help='Organization name to be displayed.'),
cfg.StrOpt('idp_organization_url',
help='URL of the organization.'),
cfg.StrOpt('idp_contact_company',
help='Company of contact person.'),
cfg.StrOpt('idp_contact_name',
help='Given name of contact person'),
cfg.StrOpt('idp_contact_surname',
help='Surname of contact person.'),
cfg.StrOpt('idp_contact_email',
help='Email address of contact person.'),
cfg.StrOpt('idp_contact_telephone',
help='Telephone number of contact person.'),
cfg.StrOpt('idp_contact_type', default='other',
help='Contact type. Allowed values are: '
'technical, support, administrative '
'billing, and other'),
cfg.StrOpt('idp_metadata_path',
default='/etc/keystone/saml2_idp_metadata.xml',
help='Path to the Identity Provider Metadata file. '
'This file should be generated with the '
'keystone-manage saml_idp_metadata command.'),
],
}
CONF = cfg.CONF
messaging.set_transport_defaults(control_exchange='keystone')
def _register_auth_plugin_opt(conf, option):
conf.register_opt(option, group='auth')
def setup_authentication(conf=None):
# register any non-default auth methods here (used by extensions, etc)
if conf is None:
conf = CONF
for method_name in conf.auth.methods:
if method_name not in _DEFAULT_AUTH_METHODS:
option = cfg.StrOpt(method_name)
_register_auth_plugin_opt(conf, option)
def configure(conf=None):
if conf is None:
conf = CONF
conf.register_cli_opt(
cfg.BoolOpt('standard-threads', default=False,
help='Do not monkey-patch threading system modules.'))
conf.register_cli_opt(
cfg.StrOpt('pydev-debug-host',
help='Host to connect to for remote debugger.'))
conf.register_cli_opt(
cfg.IntOpt('pydev-debug-port',
help='Port to connect to for remote debugger.'))
for section in FILE_OPTIONS:
for option in FILE_OPTIONS[section]:
if section:
conf.register_opt(option, group=section)
else:
conf.register_opt(option)
# register any non-default auth methods here (used by extensions, etc)
setup_authentication(conf)
def list_opts():
"""Return a list of oslo.config options available in Keystone.
The returned list includes all oslo.config options which are registered as
the "FILE_OPTIONS" in keystone.common.config. This list will not include
the options from the oslo-incubator library or any options registered
dynamically at run time.
Each object in the list is a two element tuple. The first element of
each tuple is the name of the group under which the list of options in the
second element will be registered. A group name of None corresponds to the
[DEFAULT] group in config files.
This function is also discoverable via the 'oslo.config.opts' entry point
under the 'keystone.config.opts' namespace.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return FILE_OPTIONS.items()
|
|
import sys
import inspect
import logging
from urllib.parse import urlencode
from aatest import Break
from aatest import Unknown
from aatest.events import EV_SEND
from aatest.events import EV_HTTP_RESPONSE
from aatest.operation import Operation
# from saml2 import samlp
from saml2 import SAMLError
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_SOAP
from saml2 import SamlBase
from saml2.httputil import Response
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NAMEID_FORMAT_PERSISTENT
__author__ = 'roland'
logger = logging.getLogger(__name__)
class MissingMetadata(Exception):
pass
class UnknownBinding(Exception):
pass
class ServiceProviderRequestHandlerError(Exception):
pass
def map_arguments(args, map):
for fro, to in map.items():
try:
args[to] = args[fro]
except KeyError:
pass
else:
del args[fro]
return args
class Request(Operation):
name_id_formats = [NAMEID_FORMAT_TRANSIENT, NAMEID_FORMAT_PERSISTENT]
bindings = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT]
message = None
message_cls = SamlBase
def __init__(self, conv, webio, sh, **kwargs):
Operation.__init__(self, conv, webio, sh, **kwargs)
self.expect_error = {}
self.req_args = {}
self.op_args = {}
self.msg_param = {}
self.csi = None
self.entity = self.conv.entity
self.trace = self.conv.trace
self.relay_state = ''
self.request_id = ''
self.response_args = {}
self.request_inst = None
def expected_error_response(self, response):
if isinstance(response, SAMLError):
if self.expect_error["stop"]:
raise Break("Stop requested after received expected error")
else:
self.conv.trace.error("Expected error, didn't get it")
raise Break("Did not receive expected error")
def _make_request(self):
raise NotImplemented
def handle_response(self, *args):
raise NotImplemented
def op_setup(self):
metadata = self.conv.entity.metadata
try:
entity = metadata[self.conv.entity_id]
except AttributeError:
self.req_args['discovery_service_url'] = self.conv.disco_srv
except KeyError:
raise MissingMetadata("No metadata available for {}".format(self.conv.entity_id))
else:
for idp in entity["idpsso_descriptor"]:
for nformat in self.name_id_formats:
if "nameid_format" in self.req_args and self.req_args["nameid_format"]:
break
for nif in idp["name_id_format"]:
if nif["text"] == nformat:
self.req_args["nameid_format"] = nformat
break
for bind in self.bindings:
if "response_binding" in self.req_args and self.req_args["response_binding"]:
break
for sso in idp["single_sign_on_service"]: # this does not seem to be correct - response binding is not related to IDP binding
if sso["binding"] == bind:
self.req_args["response_binding"] = bind
break
for arg in ['nameid_format', 'response_binding']:
if not arg in self.req_args:
self.req_args[arg] = ''
class RedirectRequest(Request):
_class = None
_args = {}
_method = 'GET'
_binding = BINDING_HTTP_REDIRECT
def run(self):
info = self._make_request()
if isinstance(info, Response):
return info
_method = info['method']
_loc = ''
for header, value in info['headers']:
if header == 'Location':
_loc = value
break
self.trace.info("redirect.url: {}".format(_loc))
self.conv.events.store(EV_SEND, {'url': _loc, 'method': _method},
sender=self.__class__)
res = self.entity.send(_loc, _method)
self.conv.events.store(EV_HTTP_RESPONSE, res, sender=self.__class__)
self.trace.info("redirect response: {}".format(res.text))
return res
def unpack_form(_str, ver="SAMLRequest"):
SR_STR = "name=\"%s\" value=\"" % ver
RS_STR = 'name="RelayState" value="'
i = _str.find(SR_STR)
i += len(SR_STR)
j = _str.find('"', i)
sr = _str[i:j]
k = _str.find(RS_STR, j)
k += len(RS_STR)
l = _str.find('"', k)
rs = _str[k:l]
return {ver: sr, "RelayState": rs}
def form_post(_dict):
return urlencode(_dict)
class PostRequest(Request):
_class = None
_args = {}
_method = 'POST'
_binding = BINDING_HTTP_POST
def run(self):
send_args = self._make_request()
if isinstance(send_args, Response):
logger.debug(send_args)
return send_args
_loc = send_args['url']
self.trace.info("post.url: {}".format(_loc))
self.conv.events.store(EV_SEND, send_args, sender=self.__class__)
res = self.entity.send(**send_args)
self.conv.events.store(EV_HTTP_RESPONSE, res, sender=self.__class__)
self.trace.info("post response: {}".format(res.text))
return res
class SoapRequest(Request):
_class = None
_args = {}
_method = 'POST'
_binding = BINDING_SOAP
def run(self):
send_args = self._make_request()
if isinstance(send_args, Response):
return send_args
# _method = info['method']
_loc = send_args['url']
self.trace.info("post.url: {}".format(_loc))
self.conv.events.store(EV_SEND, send_args, sender=self.__class__)
res = self.entity.send(**send_args)
self.conv.events.store(EV_HTTP_RESPONSE, res, sender=self.__class__)
self.trace.info("post response: {}".format(res.text))
return res
# -----------------------------------------------------------------------------
def factory(name):
for fname, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
if name == fname:
return obj
raise Unknown("Couldn't find the operation: '{}'".format(name))
|
|
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
# Avoid a refcycle if the target function holds an indirect
# reference to the process object (see bpo-30775)
del self._target, self._args, self._kwargs
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
returncode = self._popen.poll()
if returncode is None:
return True
else:
_current_process._children.discard(self)
return False
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, basestring), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) in (int, long):
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif isinstance(e.args[0], (int, long)):
exitcode = int(e.args[0])
else:
sys.stderr.write(str(e.args[0]) + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
|
|
"""
Copyright (c) 2017 Baidu Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import random
import pprint
from operator import mul
def is_terminal(symbol):
"""
A terminal is a single-quoted string literal; otherwise it's a nontermimal
"""
return len(symbol) >= 2 and symbol[0] == "'" and symbol[-1] == "'"
class RHS:
def __init__(self, items=None, must_bound=False):
assert isinstance(items, list)
self.must_bound = must_bound
self.items = items
self.items_backup = copy.deepcopy(items)
def all_values(self):
"""
Return the all values given the current bindings
"""
return self.items
def value(self):
"""
Return a value.
If must_bound is True, them self.items must only contain one value;
otherwise a value is randomly sampled from the items list.
"""
assert not self.must_bound or len(self.items) == 1, \
"RHS must be bound first"
return random.choice(self.items)
def unbind(self):
"""
Restore the items list.
"""
self.items = copy.deepcopy(self.items_backup)
def set_must_bound(self):
"""
If called, then an RHS object with multiple values must be bound
before value() can be called.
"""
self.must_bound = True
def bind(self, item):
"""
Narrow down the items list to only one element
"""
assert item in self.items, "Invalid RHS item: " + item
self.items = [item]
class CFG:
def __init__(self, string, start_symbol='S'):
self.grammar_str = string
rules = [r for r in string.splitlines() if not r.strip() == ""]
self.start_symbol = start_symbol
self.productions = {}
for r in rules:
self.set_production_rule(r)
def __parse_rule(self, rule_str):
"""
A production rule must be in the form "X -> Y1 | Y2 | ...", where X
is a non-terminal lhs symbol and the rhs contains a list of choices.
Y could be a terminal symbol (indicated by single quotes around), a non-terminal
symbol (literal), or a mixture of the two.
The user can use '-->' instead of '->' to require that the rhs must be
bound before a sentence can be generated.
The output is a tuple (lhs, rhs, must_bound)
"""
separator = "->"
if "-->" in rule_str: ## '-->' indicates a must_bound
separator = "-->"
strs = rule_str.split(separator)
assert len(strs) == 2, "invalid format of the rule: " + rule_str
lhs = strs[0].strip()
assert not is_terminal(lhs), "LHS cannot be a terminal:" + lhs
rhs_items = [i.strip() for i in strs[1].split("|")]
return lhs, rhs_items, separator=="-->"
def show(self):
"""
Print the grammar as a string
"""
pprint.pprint(self.grammar_str)
def bind(self, binding_str):
"""
Bind a production rule. The binding_str should be in the form "X -> Y".
This will bind Y to X.
"""
lhs, rhs_items, _ = self.__parse_rule(binding_str)
assert lhs in self.productions, "No such production rule: " + lhs
assert len(rhs_items) == 1, "ambiguous binding: " + binding_str
self.productions[lhs].bind(rhs_items[0])
def __unbind_all(self):
"""
Unbind the rhs of all production rules
"""
for lhs, rhs in self.productions.iteritems():
rhs.unbind()
def set_start_symbol(self, start_symbol):
assert not is_terminal(start_symbol), "start_symbol cannot be a terminal!"
self.start_symbol = start_symbol
def set_production_rule(self, string):
"""
Add a new rule or modify an existing rule.
"""
lhs, rhs_items, must_bound = self.__parse_rule(string)
self.productions[lhs] = RHS(rhs_items)
if must_bound:
self.productions[lhs].set_must_bound()
self.check_recursion()
def check_recursion(self):
"""
Each node i has three status: not in visited, visited[i]=false,
visited[i]=true
The first means that the node has not been visited;
The second means that the node is a parent of the current node
The third means that the node is a brother of the current node
"""
def _dfs(symbol, visited):
visited[symbol] = False
if symbol in self.productions:
for item in self.productions[symbol].all_values():
visited_backup = copy.deepcopy(visited)
rhs_symbols = item.split()
for s in rhs_symbols:
if not s in visited:
_dfs(s, visited)
else:
assert visited[s], "Recursion in the grammar!"
visited = visited_backup
visited[symbol] = True
_dfs(self.start_symbol, {})
def clear_grammar(self):
self.productions = {}
self.grammar_str = ""
self.start_symbol = ""
def generate(self, start_symbol=None):
"""
Generate a sentence given the grammar and bindings.
If for a production rule, the rhs has multiple unbound values, then a value is
randomly sampled. This will raise errors if a rhs is not bound but its must_bound
is True.
"""
if start_symbol == None:
start_symbol = self.start_symbol
assert not is_terminal(start_symbol), "start_symbol must be a nonterminal"
def _generate(symbol):
if is_terminal(symbol):
return symbol[1:-1] # remove the single quotes
else:
assert symbol in self.productions, "Ungrounded nonterminal: " + symbol
rhs = self.productions[symbol].value()
rhs_symbols = rhs.split()
return " ".join([_generate(s) for s in rhs_symbols])
sentence = _generate(start_symbol)
self.__unbind_all()
return sentence
def generate_all(self, start_symbol=None):
"""
Generate all possible sentences given the grammar and the bindings.
This will use the existing bindings, but will ignore must_bound.
"""
if start_symbol == None:
start_symbol = self.start_symbol
assert not is_terminal(start_symbol), "start_symbol must be a nonterminal"
def _generate(symbols):
assert isinstance(symbols, list)
if len(symbols) == 0:
yield []
else:
for frag1 in _generate_one(symbols[0]):
for frag2 in _generate(symbols[1:]):
yield frag1 + frag2
def _generate_one(symbol):
assert isinstance(symbol, str)
if is_terminal(symbol):
yield [symbol[1:-1]]
else:
assert symbol in self.productions, "Ungrounded nonterminal: " + symbol
for rhs in self.productions[symbol].all_values():
for frag in _generate(rhs.split()):
yield frag
sentences = [" ".join(words) for words in list(_generate_one(start_symbol))]
self.__unbind_all()
return sentences
def total_possible_sentences(self, start_symbol=None):
"""
Count the total number of possible sentences for the grammar.
The total number will be affected by the existing bindings,
but will ignore must_bound.
"""
if not self.productions:
return 0
if start_symbol == None:
start_symbol = self.start_symbol
assert not is_terminal(start_symbol), "start_symbol must be a nonterminal"
def _count(symbol):
if is_terminal(symbol):
return 1
else:
## ignore ungrounded nonterminal
if not symbol in self.productions:
return 0
total = 0
for rhs in self.productions[symbol].all_values():
total += reduce(mul, map(lambda i: _count(i), rhs.split()), 1)
return total
num = _count(start_symbol)
self.__unbind_all()
return num
## test
if __name__ == "__main__":
cfg = CFG("""
S -> 'we' | N 'you'
N --> 'us' | O
O -> 'dead' W
W -> 'sdfxc' | 'xcvxc' 'sdfscx' 'xcvx'
""")
print(cfg.total_possible_sentences())
cfg.bind("N -> 'us'")
print(cfg.generate_all()) ## this will unbind
cfg.bind("S -> N 'you'")
try:
print(cfg.generate()) ## this will raise error because of must_bound
assert False, "This shouldn't happen"
except:
pass
|
|
#
# FBrowser.py -- File Browser plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os
import stat, time
from ginga.misc.plugins import FBrowserBase
from ginga.misc import Bunch
from ginga.gtkw import gtksel
import gtk
#icon_ext = '.svg'
icon_ext = '.png'
class FBrowser(FBrowserBase.FBrowserBase):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(FBrowser, self).__init__(fv, fitsimage)
self.cell_data_funcs = (self.file_name, self.file_size,
self.file_mode, self.file_last_changed)
self.cell_sort_funcs = []
for hdr, key in self.columns:
self.cell_sort_funcs.append(self._mksrtfnN(key))
icondir = self.fv.iconpath
foldericon = os.path.join(icondir, 'folder'+icon_ext)
self.folderpb = gtksel.pixbuf_new_from_file_at_size(foldericon, 24, 24)
fileicon = os.path.join(icondir, 'file'+icon_ext)
self.filepb = gtksel.pixbuf_new_from_file_at_size(fileicon, 24, 24)
fitsicon = os.path.join(icondir, 'fits'+icon_ext)
self.fitspb = gtksel.pixbuf_new_from_file_at_size(fitsicon, 24, 24)
def build_gui(self, container):
rvbox = gtk.VBox()
sw = gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
# create the TreeView
#self.treeview = gtk.TreeView()
self.treeview = MultiDragDropTreeView()
# create the TreeViewColumns to display the data
self.tvcolumn = [None] * len(self.columns)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_padding(2, 0)
header, kwd = self.columns[0]
tvc = gtk.TreeViewColumn(header, cellpb)
tvc.set_resizable(True)
tvc.connect('clicked', self.sort_cb, 0)
tvc.set_clickable(True)
self.tvcolumn[0] = tvc
self.tvcolumn[0].set_cell_data_func(cellpb, self.file_pixbuf)
cell = gtk.CellRendererText()
cell.set_padding(2, 0)
self.tvcolumn[0].pack_start(cell, False)
self.tvcolumn[0].set_cell_data_func(cell, self.file_name)
self.treeview.append_column(self.tvcolumn[0])
for n in range(1, len(self.columns)):
cell = gtk.CellRendererText()
cell.set_padding(2, 0)
header, kwd = self.columns[n]
tvc = gtk.TreeViewColumn(header, cell)
tvc.set_resizable(True)
tvc.connect('clicked', self.sort_cb, n)
tvc.set_clickable(True)
self.tvcolumn[n] = tvc
if n == 1:
cell.set_property('xalign', 1.0)
self.tvcolumn[n].set_cell_data_func(cell, self.cell_data_funcs[n])
self.treeview.append_column(self.tvcolumn[n])
sw.add(self.treeview)
self.treeview.connect('row-activated', self.open_file)
# enable multiple selection
treeselection = self.treeview.get_selection()
treeselection.set_mode(gtk.SELECTION_MULTIPLE)
# enable drag from this widget
toImage = [ ( "text/plain", 0, 0 ) ]
self.treeview.enable_model_drag_source(gtk.gdk.BUTTON1_MASK,
toImage, gtk.gdk.ACTION_COPY)
self.treeview.connect("drag-data-get", self.drag_data_get_cb)
rvbox.pack_start(sw, fill=True, expand=True)
self.entry = gtk.Entry()
rvbox.pack_start(self.entry, fill=True, expand=False)
self.entry.connect('activate', self.browse_cb)
hbox = gtk.HBox(spacing=2)
btn = gtk.Button("Load")
btn.connect('clicked', lambda w: self.load_cb())
hbox.pack_start(btn, fill=False, expand=False)
btn = gtk.Button("Save Image As")
btn.connect('clicked', lambda w: self.save_as_cb())
hbox.pack_start(btn, fill=False, expand=False)
self.entry2 = gtk.Entry()
self.entry.connect('activate', self.browse_cb)
hbox.pack_start(self.entry2, fill=True, expand=True)
rvbox.pack_start(hbox, fill=True, expand=False)
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_START)
btns.set_spacing(3)
btn = gtk.Button("Close")
btn.connect('clicked', lambda w: self.close())
btns.add(btn)
btn = gtk.Button("Refresh")
btn.connect('clicked', lambda w: self.refresh())
btns.add(btn)
btn = gtk.Button("Make Thumbs")
btn.connect('clicked', lambda w: self.make_thumbs())
btns.add(btn)
rvbox.pack_start(btns, padding=4, fill=True, expand=False)
cw = container.get_widget()
cw.pack_start(rvbox, padding=0, fill=True, expand=True)
def sort_cb(self, column, idx):
treeview = column.get_tree_view()
model = treeview.get_model()
model.set_sort_column_id(idx, gtk.SORT_ASCENDING)
fn = self.cell_sort_funcs[idx]
model.set_sort_func(idx, fn)
return True
def _mksrtfnN(self, key):
def fn(model, iter1, iter2):
bnch1 = model.get_value(iter1, 0)
bnch2 = model.get_value(iter2, 0)
val1, val2 = bnch1[key], bnch2[key]
if isinstance(val1, str):
val1 = val1.lower()
val2 = val2.lower()
res = cmp(val1, val2)
return res
return fn
def file_pixbuf(self, *args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
if bnch.type == 'dir':
pb = self.folderpb
elif bnch.type == 'fits':
pb = self.fitspb
else:
pb = self.filepb
cell.set_property('pixbuf', pb)
def file_name(self, *args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
cell.set_property('text', bnch.name)
def file_size(self, *args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
cell.set_property('text', str(bnch.st_size))
def file_mode(self, *args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
cell.set_property('text', oct(stat.S_IMODE(bnch.st_mode)))
def file_last_changed(self, *args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
cell.set_property('text', time.ctime(bnch.st_mtime))
def open_file(self, treeview, path, column):
model = treeview.get_model()
iter = model.get_iter(path)
bnch = model.get_value(iter, 0)
path = bnch.path
self.logger.debug("path: %s" % (path))
if path == '..':
curdir, curglob = os.path.split(self.curpath)
path = os.path.join(curdir, path, curglob)
if os.path.isdir(path):
path = os.path.join(path, '*')
self.browse(path)
elif os.path.exists(path):
#self.fv.load_file(path)
uri = "file://%s" % (path)
self.fitsimage.make_callback('drag-drop', [uri])
else:
self.browse(path)
def get_selected_paths(self):
treeselection = self.treeview.get_selection()
model, pathlist = treeselection.get_selected_rows()
paths = []
for path in pathlist:
tree_iter = model.get_iter(path)
bnch = model.get_value(tree_iter, 0)
uri = "file://%s" % (bnch.path)
paths.append(uri)
return paths
def load_cb(self):
paths = self.get_selected_paths()
#self.fv.dragdrop(self.fitsimage, paths)
self.fv.gui_do(self.fitsimage.make_callback, 'drag-drop',
paths)
def drag_data_get_cb(self, treeview, context, selection,
info, timestamp):
paths = self.get_selected_paths()
#selection.set_uris(paths)
selection.set("text/plain", 0, '\n'.join(paths))
def makelisting(self, path):
self.entry.set_text(path)
listmodel = gtk.ListStore(object)
for bnch in self.jumpinfo:
listmodel.append([bnch])
self.treeview.set_fixed_height_mode(False)
self.treeview.set_model(listmodel)
# Hack to get around slow TreeView scrolling with large lists
self.treeview.set_fixed_height_mode(True)
def browse_cb(self, w):
path = w.get_text().strip()
self.browse(path)
def save_as_cb(self):
path = self.entry2.get_text()
if not path.startswith('/'):
path = os.path.join(self.curpath, path)
image = self.fitsimage.get_image()
self.fv.error_wrap(image.save_as_file, path)
def __str__(self):
return 'fbrowser'
class MultiDragDropTreeView(gtk.TreeView):
'''TreeView that captures mouse events to make drag and drop work
properly
See: https://gist.github.com/kevinmehall/278480#file-multiple-selection-dnd-class-py
'''
def __init__(self):
super(MultiDragDropTreeView, self).__init__()
self.connect('button_press_event', self.on_button_press)
self.connect('button_release_event', self.on_button_release)
self.defer_select = False
def on_button_press(self, widget, event):
# Here we intercept mouse clicks on selected items so that we can
# drag multiple items without the click selecting only one
target = self.get_path_at_pos(int(event.x), int(event.y))
if (target
and event.type == gtk.gdk.BUTTON_PRESS
and not (event.state & (gtk.gdk.CONTROL_MASK|gtk.gdk.SHIFT_MASK))
and self.get_selection().path_is_selected(target[0])):
# disable selection
self.get_selection().set_select_function(lambda *ignore: False)
self.defer_select = target[0]
def on_button_release(self, widget, event):
# re-enable selection
self.get_selection().set_select_function(lambda *ignore: True)
target = self.get_path_at_pos(int(event.x), int(event.y))
if (self.defer_select and target
and self.defer_select == target[0]
and not (event.x==0 and event.y==0)): # certain drag and drop
self.set_cursor(target[0], target[1], False)
self.defer_select=False
#END
|
|
import theano
import numpy as np
from theano import config
from collections import OrderedDict
import theano.tensor as T
"""
OPTIMIZERS FOR THEANO
"""
def regularize(cost, params, reg_val, reg_type, reg_spec):
"""
Return a theano cost
cost: cost to regularize
params: list of parameters
reg_val: multiplier for regularizer
reg_type: type of regularizer 'l1','l2'
pnorm_str: simple regex to exclude parameters not satisfying regex
"""
l1 = lambda p: T.sum(abs(p))
l2 = lambda p: T.sum(p**2)
rFxn = {}
rFxn['l1']=l1
rFxn['l2']=l2
if reg_type=='l2' or reg_type=='l1':
assert reg_val is not None,'Expecting reg_val to be specified'
print "<< Reg:("+reg_type+') Reg. Val:('+str(reg_val)+') Reg. Spec.:('+reg_spec+')>>'
regularizer= theano.shared(np.asarray(0).astype(config.floatX),name = 'reg_norm', borrow=True)
for p in params:
if reg_spec in p.name:
regularizer += rFxn[reg_type](p)
print ('<<<<<< Adding '+reg_type+' regularization for '+p.name)+' >>>>>>'
return cost + reg_val*regularizer
else:
return cost
def normalize(grads, grad_norm):
"""
grads: list of gradients
grad_norm : None (or positive value)
returns: gradients rescaled to satisfy norm constraints
"""
#Check if we're clipping gradients
if grad_norm is not None:
assert grad_norm > 0, 'Must specify a positive value to normalize to'
print '<<<<<< Normalizing Gradients to have norm (',grad_norm,') >>>>>>'
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(T.switch(g2 > (grad_norm**2), g/T.sqrt(g2)*grad_norm, g))
return new_grads
else:
return grads
def rescale(grads, divide_grad):
"""
grads : list of gradients
divide_grad : scalar or theano variable
returns: gradients divided by provided variable
"""
if divide_grad is not None:
print '<<<<<< Rescaling Gradients >>>>>>'
new_grads = []
for g in grads:
new_grads.append(g/divide_grad)
return new_grads
else:
return grads
def adam(cost, params, lr=0.001, b1=0.1, b2=0.001, e=1e-8, opt_params = None,
grad_range= None, #Whether or not you would like to specify a range for grads
grad_norm = None, #Clip gradients using normalization
reg_type = None,# Can be 'l1' or 'l2' or ''
reg_value = None, #Specify the multiplier for the regularization type
reg_spec = 'DOESNOTMATCHANYTHING',#Restricting the weights to consider set to '' to regularize all
divide_grad=None, #Rescale the gradient by batch size
optsuffix = '', #Suffix for the set of updates. Use this if you would like to be able to update
grad_noise= 0., rng = None #Add gradient noise using rng
):
"""
ADAM Optimizer
cost (to be minimized)
params (list of parameters to take gradients with respect to)
.... parameters specific to the optimization ...
opt_params (if available, used to intialize the variables
"""
updates = []
regularized_cost = regularize(cost, params, reg_value, reg_type, reg_spec)
grads = T.grad(regularized_cost, params)
grads = rescale(grads, divide_grad)
grads = normalize(grads, grad_norm)
def getName(pname, suffix = optsuffix):
return 'opt_'+pname+'_'+suffix
if opt_params is None:
opt_params=OrderedDict()
#Track the optimization variable
vname = getName('i')
#Create a new shared variable if opt_params is empty or if you cant find the variable name
if vname not in opt_params:
i = theano.shared(np.asarray(0).astype(config.floatX), name =vname, borrow=True)
opt_params[vname] = i
else:
i = opt_params[vname]
#No need to reload these theano variables
g_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'g_norm',borrow=True)
p_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'p_norm',borrow=True)
opt_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'opt_norm',borrow=True)
#Initialization for ADAM
i_t = i + 1.
#b1=0.1, b2=0.001
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
if grad_noise>0:
print ' Adding gradient noise '
frac = grad_noise / (1+i_t)**0.55
grads = [g+rng.normal(g.shape)*frac for g in grads]
for p, g in zip(params, grads):
if grad_range is not None:
print '<<<<<< ADAM: Truncating Gradients in Range +-(',grad_range,') >>>>>>'
g = T.clip(g,-grad_range, grad_range)
vname_m = getName('m+'+p.name)
vname_v = getName('v+'+p.name)
#Create a new shared variable if opt_params is empty or if you cant find the variable name
if vname_m not in opt_params:
m = theano.shared(p.get_value() * 0.,name = vname_m,borrow=True)
v = theano.shared(p.get_value() * 0.,name = vname_v,borrow=True)
opt_params[vname_m] = m
opt_params[vname_v] = v
else:
m = opt_params[vname_m]
v = opt_params[vname_v]
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
#Update norms
g_norm += (g**2).sum()
p_norm += (p**2).sum()
opt_norm+=(m**2).sum() + (v**2).sum()
updates.append((i, i_t))
return updates, [T.sqrt(p_norm), T.sqrt(g_norm), T.sqrt(opt_norm), regularized_cost], opt_params
def adamNew(cost, params, lr=0.001, b1=0.9, b2=0.999, e=1e-8, opt_params = None, gamma=1.-1e-8,
grad_range= None, #Whether or not you would like to specify a range for grads
grad_norm = None, #Clip gradients using normalization
reg_type = None,# Can be 'l1' or 'l2' or ''
reg_value = None, #Specify the multiplier for the regularization type
reg_spec = 'DOESNOTMATCHANYTHING',#Restricting the weights to consider set to '' to regularize all
divide_grad=None, #Rescale the gradient by batch size
optsuffix = '', #Suffix for the set of updates. Use this if you would like to be able to update
grad_noise= 0., rng = None #Add gradient noise using rng
):
"""
ADAM Optimizer
cost (to be minimized)
params (list of parameters to take gradients with respect to)
.... parameters specific to the optimization ...
opt_params (if available, used to intialize the variables
"""
updates = []
regularized_cost = regularize(cost, params, reg_value, reg_type, reg_spec)
grads = T.grad(regularized_cost, params)
grads = rescale(grads, divide_grad)
grads = normalize(grads, grad_norm)
def getName(pname, suffix = optsuffix):
return 'opt_'+pname+'_'+suffix
if opt_params is None:
opt_params=OrderedDict()
#Track the optimization variable
vname = getName('i')
#Create a new shared variable if opt_params is empty or if you cant find the variable name
if vname not in opt_params:
i = theano.shared(np.asarray(0).astype(config.floatX), name =vname, borrow=True)
opt_params[vname] = i
else:
i = opt_params[vname]
#No need to reload these theano variables
g_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'g_norm',borrow=True)
p_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'p_norm',borrow=True)
opt_norm = theano.shared(np.asarray(0).astype(config.floatX),name = 'opt_norm',borrow=True)
#Initialization for ADAM
i_t = i + 1.
#b1=0.9, b2=0.999
b1_t = b1*gamma**(i_t-1)
if grad_noise>0:
print ' Adding gradient noise '
frac = grad_noise / (1+i_t)**0.55
grads = [g+rng.normal(g.shape)*frac for g in grads]
for p, g in zip(params, grads):
if grad_range is not None:
print '<<<<<< ADAM: Truncating Gradients in Range +-(',grad_range,') >>>>>>'
g = T.clip(g,-grad_range, grad_range)
vname_m = getName('m+'+p.name)
vname_v = getName('v+'+p.name)
#Create a new shared variable if opt_params is empty or if you cant find the variable name
if vname_m not in opt_params:
m = theano.shared(p.get_value() * 0.,name = vname_m,borrow=True)
v = theano.shared(p.get_value() * 0.,name = vname_v,borrow=True)
opt_params[vname_m] = m
opt_params[vname_v] = v
else:
m = opt_params[vname_m]
v = opt_params[vname_v]
#Update ADAM parameters
m_t = b1_t*m + (1 - b1_t)*g
v_t = b2*v + (1 - b2)*g**2
m_hat = m_t / (1-b1**i_t)
v_hat = v_t / (1-b2**i_t)
p_t = p - (lr* m_hat) / (T.sqrt(v_hat) + e)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
#Update norms
g_norm += (g**2).sum()
p_norm += (p**2).sum()
opt_norm+=(m**2).sum() + v.sum()
updates.append((i, i_t))
return updates, [T.sqrt(p_norm), T.sqrt(g_norm), T.sqrt(opt_norm), regularized_cost], opt_params
|
|
#!/usr/bin/python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, os.path, textwrap, argparse, sys, shlex, subprocess, tempfile, re
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:]])
def get_flags():
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('flags'):
return re.sub(r'^flags\s+: ', '', line).split()
def add_tristate(arg_parser, name, dest, help):
arg_parser.add_argument('--enable-' + name, dest = dest, action = 'store_true', default = None,
help = 'Enable ' + help)
arg_parser.add_argument('--disable-' + name, dest = dest, action = 'store_false', default = None,
help = 'Disable ' + help)
def apply_tristate(var, test, note, missing):
if (var is None) or var:
if test():
return True
elif var == True:
print(missing)
sys.exit(1)
else:
print(note)
return False
return False
#
# dpdk_cflags - fetch the DPDK specific CFLAGS
#
# Run a simple makefile that "includes" the DPDK main makefile and prints the
# MACHINE_CFLAGS value
#
def dpdk_cflags (dpdk_target):
with tempfile.NamedTemporaryFile() as sfile:
dpdk_target = os.path.abspath(dpdk_target)
dpdk_target = re.sub(r'\/+$', '', dpdk_target)
dpdk_sdk_path = os.path.dirname(dpdk_target)
dpdk_target_name = os.path.basename(dpdk_target)
dpdk_arch = dpdk_target_name.split('-')[0]
if args.dpdk:
dpdk_sdk_path = 'dpdk'
dpdk_target = os.getcwd() + '/build/dpdk'
dpdk_target_name = 'x86_64-{}-linuxapp-gcc'.format(dpdk_machine)
dpdk_arch = 'x86_64'
sfile.file.write(bytes('include ' + dpdk_sdk_path + '/mk/rte.vars.mk' + "\n", 'utf-8'))
sfile.file.write(bytes('all:' + "\n\t", 'utf-8'))
sfile.file.write(bytes('@echo $(MACHINE_CFLAGS)' + "\n", 'utf-8'))
sfile.file.flush()
dpdk_cflags = subprocess.check_output(['make', '--no-print-directory',
'-f', sfile.name,
'RTE_SDK=' + dpdk_sdk_path,
'RTE_OUTPUT=' + dpdk_target,
'RTE_TARGET=' + dpdk_target_name,
'RTE_SDK_BIN=' + dpdk_target,
'RTE_ARCH=' + dpdk_arch])
dpdk_cflags_str = dpdk_cflags.decode('utf-8')
dpdk_cflags_str = re.sub(r'\n+$', '', dpdk_cflags_str)
dpdk_cflags_final = ''
return dpdk_cflags_str
def try_compile(compiler, source = '', flags = []):
with tempfile.NamedTemporaryFile() as sfile:
sfile.file.write(bytes(source, 'utf-8'))
sfile.file.flush()
return subprocess.call([compiler, '-x', 'c++', '-o', '/dev/null', '-c', sfile.name] + flags,
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL) == 0
def warning_supported(warning, compiler):
# gcc ignores -Wno-x even if it is not supported
adjusted = re.sub('^-Wno-', '-W', warning)
return try_compile(flags = [adjusted], compiler = compiler)
def debug_flag(compiler):
src_with_auto = textwrap.dedent('''\
template <typename T>
struct x { auto f() {} };
x<int> a;
''')
if try_compile(source = src_with_auto, flags = ['-g', '-std=gnu++1y'], compiler = compiler):
return '-g'
else:
print('Note: debug information disabled; upgrade your compiler')
return ''
modes = {
'debug': {
'sanitize': '-fsanitize=address -fsanitize=leak -fsanitize=undefined',
'sanitize_libs': '-lubsan -lasan',
'opt': '-O0 -DDEBUG -DDEBUG_SHARED_PTR -DDEFAULT_ALLOCATOR',
'libs': '',
},
'release': {
'sanitize': '',
'sanitize_libs': '',
'opt': '-O2',
'libs': '',
},
}
tests = [
'tests/fileiotest',
'tests/directory_test',
'tests/linecount',
'tests/echotest',
'tests/l3_test',
'tests/ip_test',
'tests/timertest',
'tests/tcp_test',
'tests/futures_test',
'tests/foreign_ptr_test',
'tests/smp_test',
'tests/thread_test',
'tests/thread_context_switch',
'tests/udp_server',
'tests/udp_client',
'tests/blkdiscard_test',
'tests/sstring_test',
'tests/httpd',
'tests/memcached/test_ascii_parser',
'tests/tcp_server',
'tests/tcp_client',
'tests/allocator_test',
'tests/output_stream_test',
'tests/udp_zero_copy',
'tests/shared_ptr_test',
'tests/slab_test',
'tests/fstream_test',
'tests/distributed_test',
'tests/rpc',
'tests/semaphore_test',
'tests/packet_test',
]
apps = [
'apps/httpd/httpd',
'apps/seawreck/seawreck',
'apps/seastar/seastar',
'apps/memcached/memcached',
]
all_artifacts = apps + tests + ['libseastar.a', 'seastar.pc']
arg_parser = argparse.ArgumentParser('Configure seastar')
arg_parser.add_argument('--static', dest = 'static', action = 'store_const', default = '',
const = '-static',
help = 'Static link (useful for running on hosts outside the build environment')
arg_parser.add_argument('--pie', dest = 'pie', action = 'store_true',
help = 'Build position-independent executable (PIE)')
arg_parser.add_argument('--so', dest = 'so', action = 'store_true',
help = 'Build shared object (SO) instead of executable')
arg_parser.add_argument('--mode', action='store', choices=list(modes.keys()) + ['all'], default='all')
arg_parser.add_argument('--with', dest='artifacts', action='append', choices=all_artifacts, default=[])
arg_parser.add_argument('--cflags', action = 'store', dest = 'user_cflags', default = '',
help = 'Extra flags for the C++ compiler')
arg_parser.add_argument('--ldflags', action = 'store', dest = 'user_ldflags', default = '',
help = 'Extra flags for the linker')
arg_parser.add_argument('--compiler', action = 'store', dest = 'cxx', default = 'g++',
help = 'C++ compiler path')
arg_parser.add_argument('--with-osv', action = 'store', dest = 'with_osv', default = '',
help = 'Shortcut for compile for OSv')
arg_parser.add_argument('--enable-dpdk', action = 'store_true', dest = 'dpdk', default = False,
help = 'Enable dpdk (from included dpdk sources)')
arg_parser.add_argument('--dpdk-target', action = 'store', dest = 'dpdk_target', default = '',
help = 'Path to DPDK SDK target location (e.g. <DPDK SDK dir>/x86_64-native-linuxapp-gcc)')
arg_parser.add_argument('--debuginfo', action = 'store', dest = 'debuginfo', type = int, default = 1,
help = 'Enable(1)/disable(0)compiler debug information generation')
add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
args = arg_parser.parse_args()
libnet = [
'net/proxy.cc',
'net/virtio.cc',
'net/dpdk.cc',
'net/ip.cc',
'net/ethernet.cc',
'net/arp.cc',
'net/native-stack.cc',
'net/ip_checksum.cc',
'net/udp.cc',
'net/tcp.cc',
'net/dhcp.cc',
]
core = [
'core/reactor.cc',
'core/fstream.cc',
'core/posix.cc',
'core/memory.cc',
'core/resource.cc',
'core/scollectd.cc',
'core/app-template.cc',
'core/thread.cc',
'core/dpdk_rte.cc',
'util/conversions.cc',
'net/packet.cc',
'net/posix-stack.cc',
'net/net.cc',
'rpc/rpc.cc',
]
http = ['http/transformers.cc',
'http/json_path.cc',
'http/file_handler.cc',
'http/common.cc',
'http/routes.cc',
'json/json_elements.cc',
'json/formatter.cc',
'http/matcher.cc',
'http/mime_types.cc',
'http/httpd.cc',
'http/reply.cc',
'http/request_parser.rl',
'http/api_docs.cc',
]
boost_test_lib = [
'tests/test-utils.cc',
'tests/test_runner.cc',
]
defines = []
libs = '-laio -lboost_program_options -lboost_system -lstdc++ -lm -lboost_unit_test_framework -lboost_thread -lcryptopp -lrt'
hwloc_libs = '-lhwloc -lnuma -lpciaccess -lxml2 -lz'
xen_used = False
def have_xen():
source = '#include <stdint.h>\n'
source += '#include <xen/xen.h>\n'
source += '#include <xen/sys/evtchn.h>\n'
source += '#include <xen/sys/gntdev.h>\n'
source += '#include <xen/sys/gntalloc.h>\n'
return try_compile(compiler = args.cxx, source = source)
if apply_tristate(args.xen, test = have_xen,
note = 'Note: xen-devel not installed. No Xen support.',
missing = 'Error: required package xen-devel not installed.'):
libs += ' -lxenstore'
defines.append("HAVE_XEN")
libnet += [ 'net/xenfront.cc' ]
core += [
'core/xen/xenstore.cc',
'core/xen/gntalloc.cc',
'core/xen/evtchn.cc',
]
xen_used=True
if xen_used and args.dpdk_target:
print("Error: only xen or dpdk can be used, not both.")
sys.exit(1)
memcache_base = [
'apps/memcached/ascii.rl'
] + libnet + core
deps = {
'libseastar.a' : core + libnet + http,
'seastar.pc': [],
'apps/seastar/seastar': ['apps/seastar/main.cc'] + core,
'apps/httpd/httpd': ['apps/httpd/demo.json', 'apps/httpd/main.cc'] + http + libnet + core,
'apps/memcached/memcached': ['apps/memcached/memcache.cc'] + memcache_base,
'tests/memcached/test_ascii_parser': ['tests/memcached/test_ascii_parser.cc'] + memcache_base + boost_test_lib,
'tests/fileiotest': ['tests/fileiotest.cc'] + core + boost_test_lib,
'tests/directory_test': ['tests/directory_test.cc'] + core,
'tests/linecount': ['tests/linecount.cc'] + core,
'tests/echotest': ['tests/echotest.cc'] + core + libnet,
'tests/l3_test': ['tests/l3_test.cc'] + core + libnet,
'tests/ip_test': ['tests/ip_test.cc'] + core + libnet,
'tests/tcp_test': ['tests/tcp_test.cc'] + core + libnet,
'tests/timertest': ['tests/timertest.cc'] + core,
'tests/futures_test': ['tests/futures_test.cc'] + core + boost_test_lib,
'tests/foreign_ptr_test': ['tests/foreign_ptr_test.cc'] + core + boost_test_lib,
'tests/semaphore_test': ['tests/semaphore_test.cc'] + core + boost_test_lib,
'tests/smp_test': ['tests/smp_test.cc'] + core,
'tests/thread_test': ['tests/thread_test.cc'] + core + boost_test_lib,
'tests/thread_context_switch': ['tests/thread_context_switch.cc'] + core,
'tests/udp_server': ['tests/udp_server.cc'] + core + libnet,
'tests/udp_client': ['tests/udp_client.cc'] + core + libnet,
'tests/tcp_server': ['tests/tcp_server.cc'] + core + libnet,
'tests/tcp_client': ['tests/tcp_client.cc'] + core + libnet,
'apps/seawreck/seawreck': ['apps/seawreck/seawreck.cc', 'apps/seawreck/http_response_parser.rl'] + core + libnet,
'tests/blkdiscard_test': ['tests/blkdiscard_test.cc'] + core,
'tests/sstring_test': ['tests/sstring_test.cc'] + core,
'tests/httpd': ['tests/httpd.cc'] + http + core + boost_test_lib,
'tests/allocator_test': ['tests/allocator_test.cc', 'core/memory.cc', 'core/posix.cc'],
'tests/output_stream_test': ['tests/output_stream_test.cc'] + core + libnet + boost_test_lib,
'tests/udp_zero_copy': ['tests/udp_zero_copy.cc'] + core + libnet,
'tests/shared_ptr_test': ['tests/shared_ptr_test.cc'] + core,
'tests/slab_test': ['tests/slab_test.cc'] + core,
'tests/fstream_test': ['tests/fstream_test.cc'] + core + boost_test_lib,
'tests/distributed_test': ['tests/distributed_test.cc'] + core,
'tests/rpc': ['tests/rpc.cc'] + core + libnet,
'tests/packet_test': ['tests/packet_test.cc'] + core + libnet,
}
warnings = [
'-Wno-mismatched-tags', # clang-only
]
# The "--with-osv=<path>" parameter is a shortcut for a bunch of other
# settings:
if args.with_osv:
args.so = True
args.hwloc = False
args.user_cflags = (args.user_cflags +
' -DDEFAULT_ALLOCATOR -fvisibility=default -DHAVE_OSV -I' +
args.with_osv + ' -I' + args.with_osv + '/include -I' +
args.with_osv + '/arch/x64')
dpdk_arch_xlat = {
'native': 'native',
'nehalem': 'nhm',
'westmere': 'wsm',
'sandybridge': 'snb',
'ivybridge': 'ivb',
}
dpdk_machine = 'native'
if args.dpdk:
if not os.path.exists('dpdk') or not os.listdir('dpdk'):
raise Exception('--enable-dpdk: dpdk/ is empty. Run "git submodule update --init".')
cflags = args.user_cflags.split()
dpdk_machine = ([dpdk_arch_xlat[cflag[7:]]
for cflag in cflags
if cflag.startswith('-march')] or ['native'])[0]
subprocess.check_call('make -C dpdk RTE_OUTPUT=$PWD/build/dpdk/ config T=x86_64-native-linuxapp-gcc'.format(
dpdk_machine=dpdk_machine),
shell = True)
# adjust configutation to taste
dotconfig = 'build/dpdk/.config'
lines = open(dotconfig, encoding='UTF-8').readlines()
def update(lines, vars):
ret = []
for line in lines:
for var, val in vars.items():
if line.startswith(var + '='):
line = var + '=' + val + '\n'
ret.append(line)
return ret
lines = update(lines, {'CONFIG_RTE_LIBRTE_PMD_BOND': 'n',
'CONFIG_RTE_MBUF_SCATTER_GATHER': 'n',
'CONFIG_RTE_LIBRTE_IP_FRAG': 'n',
'CONFIG_RTE_APP_TEST': 'n',
'CONFIG_RTE_TEST_PMD': 'n',
'CONFIG_RTE_MBUF_REFCNT_ATOMIC': 'n',
'CONFIG_RTE_MAX_MEMSEG': '8192',
'CONFIG_RTE_EAL_IGB_UIO': 'n',
'CONFIG_RTE_LIBRTE_KNI': 'n',
})
lines += 'CONFIG_RTE_MACHINE={}'.format(dpdk_machine)
open(dotconfig, 'w', encoding='UTF-8').writelines(lines)
args.dpdk_target = os.getcwd() + '/build/dpdk'
if args.dpdk_target:
args.user_cflags = (args.user_cflags +
' -DHAVE_DPDK -I' + args.dpdk_target + '/include ' +
dpdk_cflags(args.dpdk_target) +
' -Wno-error=literal-suffix -Wno-literal-suffix -Wno-invalid-offsetof')
libs += (' -L' + args.dpdk_target + '/lib ')
if args.with_osv:
libs += '-lintel_dpdk -lrt -lm -ldl'
else:
libs += '-Wl,--whole-archive -lrte_pmd_vmxnet3_uio -lrte_pmd_i40e -lrte_pmd_ixgbe -lrte_pmd_e1000 -lrte_pmd_ring -Wl,--no-whole-archive -lrte_distributor -lrte_pipeline -lrte_table -lrte_port -lrte_timer -lrte_hash -lrte_lpm -lrte_power -lrte_acl -lrte_meter -lrte_sched -lrte_kvargs -lrte_mbuf -lethdev -lrte_eal -lrte_malloc -lrte_mempool -lrte_ring -lrte_cmdline -lrte_cfgfile -lrt -lm -ldl'
warnings = [w
for w in warnings
if warning_supported(warning = w, compiler = args.cxx)]
warnings = ' '.join(warnings)
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
def have_hwloc():
return try_compile(compiler = args.cxx, source = '#include <hwloc.h>\n#include <numa.h>')
if apply_tristate(args.hwloc, test = have_hwloc,
note = 'Note: hwloc-devel/numactl-devel not installed. No NUMA support.',
missing = 'Error: required packages hwloc-devel/numactl-devel not installed.'):
libs += ' ' + hwloc_libs
defines.append('HAVE_HWLOC')
defines.append('HAVE_NUMA')
if args.so:
args.pie = '-shared'
args.fpie = '-fpic'
elif args.pie:
args.pie = '-pie'
args.fpie = '-fpie'
else:
args.pie = ''
args.fpie = ''
defines = ' '.join(['-D' + d for d in defines])
globals().update(vars(args))
total_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
link_pool_depth = max(int(total_memory / 7e9), 1)
build_modes = modes if args.mode == 'all' else [args.mode]
build_artifacts = all_artifacts if not args.artifacts else args.artifacts
dpdk_sources = []
if args.dpdk:
for root, dirs, files in os.walk('dpdk'):
dpdk_sources += [os.path.join(root, file)
for file in files
if file.endswith('.h') or file.endswith('.c')]
dpdk_sources = ' '.join(dpdk_sources)
outdir = 'build'
buildfile = 'build.ninja'
os.makedirs(outdir, exist_ok = True)
do_sanitize = True
if args.static:
do_sanitize = False
with open(buildfile, 'w') as f:
dpdk_deps = ''
if args.dpdk:
# fake dependencies on dpdk, so that it is built before anything else
dpdk_deps = ' {dpdk_target}/include/rte_eal.h {dpdk_target}/lib/librte_eal.a'.format(dpdk_target=args.dpdk_target)
f.write(textwrap.dedent('''\
configure_args = {configure_args}
builddir = {outdir}
cxx = {cxx}
# we disable _FORTIFY_SOURCE because it generates false positives with longjmp() (core/thread.cc)
cxxflags = -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I. -U_FORTIFY_SOURCE {user_cflags} {warnings} {defines}
ldflags = {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags}
libs = {libs}
pool link_pool
depth = {link_pool_depth}
rule ragel
command = ragel -G2 -o $out $in
description = RAGEL $out
rule gen
command = echo -e $text > $out
description = GEN $out
rule swagger
command = json/json2code.py -f $in -o $out
description = SWAGGER $out
''').format(**globals()))
if args.dpdk:
f.write(textwrap.dedent('''\
rule dpdkmake
command = make -C build/dpdk
build {dpdk_deps} : dpdkmake {dpdk_sources}
''').format(**globals()))
for mode in build_modes:
modeval = modes[mode]
if modeval['sanitize'] and not do_sanitize:
print('Note: --static disables debug mode sanitizers')
modeval['sanitize'] = ''
modeval['sanitize_libs'] = ''
f.write(textwrap.dedent('''\
cxxflags_{mode} = {sanitize} {opt} -I $builddir/{mode}/gen
libs_{mode} = {libs} {sanitize_libs}
rule cxx.{mode}
command = $cxx -MMD -MT $out -MF $out.d $cxxflags $cxxflags_{mode} -c -o $out $in
description = CXX $out
depfile = $out.d
rule link.{mode}
command = $cxx $cxxflags_{mode} $ldflags -o $out $in $libs $libs_{mode}
description = LINK $out
pool = link_pool
rule link_stripped.{mode}
command = $cxx $cxxflags_{mode} -s $ldflags -o $out $in $libs $libs_{mode}
description = LINK (stripped) $out
pool = link_pool
rule ar.{mode}
command = rm -f $out; ar cr $out $in; ranlib $out
description = AR $out
''').format(mode = mode, **modeval))
f.write('build {mode}: phony {artifacts}\n'.format(mode = mode,
artifacts = str.join(' ', ('$builddir/' + mode + '/' + x for x in build_artifacts))))
compiles = {}
ragels = {}
swaggers = {}
for binary in build_artifacts:
srcs = deps[binary]
objs = ['$builddir/' + mode + '/' + src.replace('.cc', '.o')
for src in srcs
if src.endswith('.cc')]
if binary.endswith('.pc'):
vars = modeval.copy()
vars.update(globals())
pc = textwrap.dedent('''\
Name: Seastar
URL: http://seastar-project.org/
Description: Advanced C++ framework for high-performance server applications on modern hardware.
Version: 1.0
Libs: -L{srcdir}/{builddir} -Wl,--whole-archive -lseastar -Wl,--no-whole-archive {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags} {libs} {sanitize_libs}
Cflags: -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I{srcdir} -I{srcdir}/{builddir}/gen {user_cflags} {warnings} {defines} {sanitize} {opt}
''').format(builddir = 'build/' + mode, srcdir = os.getcwd(), **vars)
f.write('build $builddir/{}/{}: gen\n text = {}\n'.format(mode, binary, repr(pc)))
elif binary.endswith('.a'):
f.write('build $builddir/{}/{}: ar.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
else:
if binary.startswith('tests/'):
# Our code's debugging information is huge, and multiplied
# by many tests yields ridiculous amounts of disk space.
# So we strip the tests by default; The user can very
# quickly re-link the test unstripped by adding a "_g"
# to the test name, e.g., "ninja build/release/testname_g"
f.write('build $builddir/{}/{}: link_stripped.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
f.write('build $builddir/{}/{}_g: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
else:
f.write('build $builddir/{}/{}: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
for src in srcs:
if src.endswith('.cc'):
obj = '$builddir/' + mode + '/' + src.replace('.cc', '.o')
compiles[obj] = src
elif src.endswith('.rl'):
hh = '$builddir/' + mode + '/gen/' + src.replace('.rl', '.hh')
ragels[hh] = src
elif src.endswith('.json'):
hh = '$builddir/' + mode + '/gen/' + src + '.hh'
swaggers[hh] = src
else:
raise Exception('No rule for ' + src)
for obj in compiles:
src = compiles[obj]
gen_headers = list(ragels.keys()) + list(swaggers.keys())
f.write('build {}: cxx.{} {} || {} \n'.format(obj, mode, src, ' '.join(gen_headers) + dpdk_deps))
for hh in ragels:
src = ragels[hh]
f.write('build {}: ragel {}\n'.format(hh, src))
for hh in swaggers:
src = swaggers[hh]
f.write('build {}: swagger {}\n'.format(hh,src))
f.write(textwrap.dedent('''\
rule configure
command = python3 configure.py $configure_args
generator = 1
build build.ninja: configure | configure.py
rule cscope
command = find -name '*.[chS]' -o -name "*.cc" -o -name "*.hh" | cscope -bq -i-
description = CSCOPE
build cscope: cscope
default {modes_list}
''').format(modes_list = ' '.join(build_modes), **globals()))
|
|
# Copyright (C) 2015 Noa-Emil Nissinen (4shadoww)
# Import python modules
import sys
import os
import imp
import traceback
import curses
import time
import importlib
import glob
# Import getpath for lib path
from core import getpath
# Append lib path
sys.path.append(getpath.lib())
# Import core modules
from core import helptable
from core import helpin
from core import info
from core import colors
from core import moduleop
from prettytable import PrettyTable
import core.cowsay
from core import dsag
import core.matrix
import core.touchingsky
from core.hftest import check_module
from core import mscop
from core import value_holder
from core import moddbparser
from core.messages import *
from core.apistatus import *
# Import exceptions
from core.exceptions import UnknownCommand
from core.exceptions import ModuleNotFound
from core.exceptions import VariableError
class Cmethods:
# Module manager object
mm = None
modadd = None
# Init
def __init__(self, mmi):
self.mm = mmi
# Module custom commands
def mcu(self, command):
try:
if command[0] in self.modadd.customcommands.keys():
call = getattr(self.modadd, command[0])
try:
return call(command[1:])
except Exception as e:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
raise UnknownCommand("unknown command")
except AttributeError:
raise UnknownCommand("unknown command")
# Built in commands
def exit(self, args):
if self.mm.module_loaded == 1:
self.mm.module_loaded = 0
self.mm.module_name = ""
else:
sys.exit()
def clear(self, args):
if len(args) != 0 and args[0] == "tmp":
mscop.clear_tmp()
else:
sys.stderr.write("\x1b[2J\x1b[H")
def cl(self, args):
os.system(' '.join(args))
def help(self, args):
if self.mm.module_loaded == 0:
print(helptable.generate_table(helpin.commands))
else:
try:
print(helptable.generate_mtable(helpin.mcommands, self.modadd.customcommands))
except AttributeError:
print(helptable.generate_table(helpin.mcommands))
try:
print('\n',self.modadd.help_notes,'\n')
except AttributeError:
pass
def version(self, args):
if self.mm.module_loaded == 1:
try:
print(self.modadd.conf["name"]+" "+self.modadd.conf["version"])
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
print("Hakku Framework " + info.version + " " + info.codename)
def ifconfig(self, args):
os.system("ifconfig"+" "+' '.join(args))
def scan(self, args):
network_scanner = importlib.import_module("core.network_scanner")
network_scanner.scan()
del network_scanner
def about(self, args):
if self.mm.module_loaded == 1:
try:
t = PrettyTable([self.modadd.conf["name"]+" "+self.modadd.conf["version"], ""])
t.add_row(["",""])
t.align = 'l'
t.valing = 'm'
t.border = False
t.add_row(["author:", self.modadd.conf["author"]])
t.add_row(["github:", self.modadd.conf["github"]])
t.add_row(["email:", self.modadd.conf["email"]])
t.add_row(["description:", self.modadd.conf["shortdesc"]])
t.add_row(["initial date:", self.modadd.conf["initdate"]])
t.add_row(["last modification:", self.modadd.conf["lastmod"]])
if self.modadd.conf["apisupport"] == True:
support = "supported"
else:
support = "not supported"
t.add_row(["api support:", support])
try:
self.modadd.conf["dependencies"]
deps = ""
i = 0
for dep in self.modadd.conf["dependencies"]:
i += 1
if i == len(self.modadd.conf["dependencies"]):
deps += dep
else:
deps += dep+", "
t.add_row(["dependencies:", deps])
except KeyError:
pass
print(t)
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
print(info.about)
def changelog(self, args):
if self.mm.module_loaded == 1:
try:
print(self.modadd.changelog)
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
try:
f = open('changelog', 'r')
file_contents = f.read()
print (file_contents)
f.close()
except IOError:
print_error("changelog file not found (have you removed it?)")
def use(self, args):
init = False
if "modules."+args[0] not in sys.modules:
init = True
if self.mm.module_loaded == 0:
try:
self.modadd = importlib.import_module("modules."+args[0])
self.mm.module_loaded = 1
self.mm.set_name(self.modadd.conf["name"])
try:
print(self.modadd.conf["message"])
except KeyError:
pass
try:
if self.modadd.conf["outdated"] == 1:
print_warning("this module is outdated and might not be working")
except KeyError:
pass
try:
if self.modadd.conf["needroot"] == 1:
if not os.geteuid() == 0:
print_warning("this module requires root permissions for full functionality!")
except KeyError:
pass
if init == True:
try:
self.modadd.init()
except AttributeError:
pass
except ImportError:
print(colors.red + "module not found" + colors.end)
raise ModuleNotFound("module not found")
except IndexError:
print(colors.red + "please enter module name" + colors.end)
raise ModuleNotFound("module not found")
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
raise UnknownCommand("module in use")
def show(self, args):
try:
if args[0] == "modules":
t = PrettyTable([colors.bold+'Modules:', ''+colors.end])
t.align = 'l'
t.valing = 'm'
t.border = False
xml = moddbparser.parsemoddb()
root = xml[0]
for category in root:
if category.tag == "category":
t.add_row(["", ""])
t.add_row([colors.red+colors.uline+category.attrib["name"]+colors.end, colors.red+colors.uline+"Description"+colors.end])
for item in category:
if item.tag == "module":
for child in item:
if child.tag == "shortdesc":
t.add_row([item.attrib["name"], child.text])
break
print(t)
elif args[0] == "options" and self.mm.module_loaded == 1:
try:
moduleop.printoptions(self.modadd)
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
raise UnknownCommand("module not loaded or unknown command")
except IndexError:
raise UnknownCommand("unknown command")
def back(self, args):
if self.mm.module_loaded == 1:
self.mm.module_loaded = 0
self.mm.module_name = ""
else:
raise UnknownCommand("unknown command")
def reload(self, args):
try:
if self.mm.module_loaded == 0:
try:
mod = "modules."+args[0]
if mod in sys.modules:
value_holder.save_values(sys.modules[mod].variables)
importlib.reload(sys.modules[mod])
value_holder.set_values(sys.modules[mod].variables)
try:
self.modadd.init()
except AttributeError:
pass
print (colors.bold+"module "+ args[0] +" reloaded"+colors.end)
else:
importlib.import_module(mod)
try:
self.modadd.init()
except AttributeError:
pass
print(colors.bold+"module "+ args[0] +" imported"+colors.end)
except IndexError:
print (colors.red+"please enter module's name"+colors.end)
else:
try:
mod = "modules."+args[0]
if mod in sys.modules:
value_holder.save_values(sys.modules[mod].variables)
importlib.reload(sys.modules[mod])
value_holder.set_values(sys.modules[mod].variables)
try:
self.modadd.init()
except AttributeError:
pass
print (colors.bold+"module "+ args[0] +" reloaded"+colors.end)
else:
importlib.import_module(mod)
try:
self.modadd.init()
except AttributeError:
pass
print(colors.bold+"module "+ self.mm.module_name +" reloaded"+colors.end)
except IndexError:
mod = "modules."+self.mm.module_name
if mod in sys.modules:
value_holder.save_values(sys.modules[mod].variables)
importlib.reload(sys.modules[mod])
value_holder.set_values(sys.modules[mod].variables)
try:
self.modadd.init()
except AttributeError:
pass
print (colors.bold+"module "+ self.mm.module_name +" reloaded"+colors.end)
else:
modadd = importlib.import_module(mod)
try:
self.modadd.init()
except AttributeError:
pass
print(colors.bold+"module "+ self.mm.module_name +" reloaded"+colors.end)
except:
print(colors.red+"faced unexpected error during reimporting:\n")
traceback.print_exc()
print(colors.end)
if api.enabled == True:
raise
def run(self, args):
if self.mm.module_loaded == 1:
try:
return self.modadd.run()
except KeyboardInterrupt:
print(colors.red+"module terminated"+colors.end)
except PermissionError:
print_error("permission denied")
return "[err] permission denied"
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
raise UnknownCommand("module not loaded")
def set(self, args):
try:
self.modadd.variables[args[0]][0] = args[1]
print(colors.bold+args[0] +" => "+ str(args[1]) + colors.end)
except (NameError, KeyError):
print(colors.red + "option not found" + colors.end)
raise VariableError("option not found")
except IndexError:
print(colors.red + "please enter variable's value" + colors.end)
raise VariableError("no value")
except:
print(colors.red+"unexpected error in module:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
def new(self, args):
try:
if args[0] == "module":
try:
completeName = os.path.join(getpath.modules(), args[1]+".py")
if os.path.exists(completeName):
print(colors.red+"module already exists"+colors.end)
else:
mfile = open(completeName, 'w')
template = os.path.join('core', 'module_template')
f = open(template, 'r')
template_contents = f.readlines()
template_contents[5] = " \"name\": \""+args[1]+"\", # Module's name (should be same as file name)\n"
template_contents[11] = " \"initdate\": \""+(time.strftime("%d.%m.%Y"))+"\", # Initial date\n"
template_contents[12] = " \"lastmod\": \""+(time.strftime("%d.%m.%Y"))+"\", # Last modification\n"
mfile.writelines(template_contents)
mfile.close()
print(colors.bold+"module "+ args[1] +".py" +" created to modules folder"+colors.end)
print(colors.bold+"done"+colors.end)
except IndexError:
print(colors.red + "please enter module's name" + colors.end)
except PermissionError:
print(colors.red + "error: permission denied" + colors.end)
except IOError:
print(colors.red + "something went wrong" + colors.end)
else:
raise UnknownCommand("unknown command")
except IndexError:
raise UnknownCommand("unkown command")
def check(self, args):
try:
if args[0] == "module":
try:
self.modadd = importlib.import_module("modules."+args[1])
print(colors.green+"module found"+colors.end)
check_module(self.modadd)
print(colors.green+"\ntest passed"+colors.end)
except IndexError:
print(colors.red + "please enter module name"+ colors.end)
except ImportError:
print(colors.red+"error: module not found"+colors.end)
except:
print(colors.red + "error:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
else:
raise UnknownCommand("unknown command")
except IndexError:
raise UnknownCommand("unkown command")
def matrix(self, args):
try:
core.matrix.main()
except KeyboardInterrupt:
curses.endwin()
curses.curs_set(1)
curses.reset_shell_mode()
curses.echo()
def cowsay(self, args):
try:
message = ' '.join(args)
print(core.cowsay.cowsay(message))
return
except ValueError:
print(core.cowsay.cowsay("Hakku Framework"))
def ds(self, args):
print(dsag.darkside)
def make(self, args):
try:
if args[0] == "exit":
sys.exit(0)
else:
raise UnknownCommand("unkown command")
except IndexError:
raise UnknownCommand("unkown command")
def touchingsky(self, args):
core.touchingsky.main()
def loaded(self, args):
print(sys.modules.keys())
def list(self, args):
if len(args) != 0 and args[0] == "dependencies":
if self.mm.module_loaded == 0:
modules = glob.glob(getpath.modules()+"*.py")
dependencies = []
for module in modules:
try:
modadd = importlib.import_module("modules."+os.path.basename(module).replace(".py", ""))
for dep in modadd.conf["dependencies"]:
if dep not in dependencies:
dependencies.append(dep)
except ImportError:
print(colors.red+"import error: "+os.path.basename(module).replace(".py", "")+colors.end)
break
except KeyError:
pass
for dep in dependencies:
print(dep)
else:
try:
for dep in self.modadd.conf["dependencies"]:
print(dep)
except KeyError:
print_info("this module doesn't require any dependencies")
else:
raise UnknownCommand("unknown command")
def init(self, args):
if self.mm.module_loaded == 1:
try:
self.modadd.init()
print("module initialized")
except AttributeError:
print("this module doesn't have init function")
else:
raise UnknownCommand("unknown command")
def redb(self, args):
if self.mm.module_loaded == 1:
try:
moduleop.addtodb(self.modadd)
except PermissionError:
print(colors.red+"error: permission denied"+colors.end)
except KeyboardInterrupt:
print()
except:
print(colors.red+"faced unexpected:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
else:
answer = input("do you want to update whole database? ")
if answer == "yes" or answer == "y":
try:
modules = glob.glob(getpath.modules()+"*.py")
for module in modules:
module = module.replace(getpath.modules(), '').replace('.py', '')
if module != '__init__' and module != "test":
modadd = importlib.import_module("modules."+module)
moduleop.addtodb(modadd)
except PermissionError:
print(colors.red+"error: permission denied"+colors.end)
except KeyboardInterrupt:
print()
except:
print(colors.red+"faced unexpected:\n")
traceback.print_exc(file=sys.stdout)
print(colors.end)
if api.enabled == True:
raise
|
|
""" Tests dataset views methods """
from __future__ import unicode_literals
from __future__ import absolute_import
import copy
import datetime
import json
import django
from django.utils.timezone import now
from rest_framework import status
from rest_framework.test import APITestCase
from data.data.json.data_v6 import DataV6
from data.dataset.json.dataset_v6 import DataSetDefinitionV6
from util import rest
from data.models import DataSet
import data.test.utils as dataset_test_utils
import storage.test.utils as storage_utils
from storage.models import Workspace
"""Tests the v6/datasets/ endpoint"""
class TestDatasetViews(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# create a workspace and files
self.workspace = storage_utils.create_workspace(name='Test Workspace', is_active=True)
self.file1 = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file2 = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file3 = storage_utils.create_file(file_name='input_f2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file4 = storage_utils.create_file(file_name='input_eb.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file5 = storage_utils.create_file(file_name='input_fb.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file6 = storage_utils.create_file(file_name='input_fb2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
today = now()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
self.dataset = dataset_test_utils.create_dataset(definition=copy.deepcopy(dataset_test_utils.DATASET_DEFINITION),
title="Test Dataset 1", description="Key Test Dataset Number one")
DataSet.objects.filter(pk=self.dataset.pk).update(created=yesterday)
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2", description="Test Dataset Number two")
DataSet.objects.filter(pk=self.dataset2.pk).update(created=tomorrow)
# create dataset members
data1 = copy.deepcopy(dataset_test_utils.DATA_DEFINITION)
data1['files']['input_e'] = [self.file1.id]
data1['files']['input_f'] = [self.file2.id, self.file3.id]
self.member1_1 = dataset_test_utils.create_dataset_members(dataset=self.dataset, data_list=[data1])[0]
data2 = copy.deepcopy(dataset_test_utils.DATA_DEFINITION)
data2['files']['input_e'] = [self.file4.id]
data2['files']['input_f'] = [self.file5.id, self.file6.id]
self.member1_1_2 = dataset_test_utils.create_dataset_members(dataset=self.dataset, data_list=[data2])
self.member2_1 = dataset_test_utils.create_dataset_members(dataset=self.dataset2)[0]
self.member2_2 = dataset_test_utils.create_dataset_members(dataset=self.dataset2)[0]
def test_successful(self):
"""Tests successfully calling the v6/datasets/ view.
"""
url = '/%s/datasets/' % self.api
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Test response contains specific dataset
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
for entry in result['results']:
expected = None
expectedFiles = 0
if entry['id'] == self.dataset.id:
expected = self.dataset
expectedFiles = 6
elif entry['id'] == self.dataset2.id:
expected = self.dataset2
expectedFiles = 0
else:
self.fail('Found unexpected result: %s' % entry['id'])
self.assertEqual(entry['title'], expected.title)
self.assertEqual(entry['files'], expectedFiles)
def test_dataset_time_successful(self):
"""Tests successfully calling the v6/datasets api with time filters
"""
yesterday = now().date() - datetime.timedelta(days=1)
yesterday = yesterday.isoformat() + 'T00:00:00Z'
today = now().date()
today = today.isoformat() + 'T00:00:00Z'
tomorrow = now().date() + datetime.timedelta(days=1)
tomorrow = tomorrow.isoformat() + 'T00:00:00Z'
url = '/%s/datasets/?started=%s' % (self.api, today)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/?ended=%s' % (self.api, today)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
def test_dataset_id_successful(self):
"""Tests successfully calling the v6/datasets/?id= api call
"""
url = '/%s/datasets/?id=%s' % (self.api, self.dataset.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/?id=%s&id=%s' % (self.api, self.dataset.id, self.dataset2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify two results
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
def test_dataset_keyword_successful(self):
"""Tests successfully calling the v6/datasets/?keyword= api call
"""
url = '/%s/datasets/?keyword=%s' % (self.api, 'key')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/?keyword=%s&keyword=%s' % (self.api, 'one', 'two')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify 2 results
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
def test_order_by(self):
"""Tests successfully calling the datasets view with sorting."""
url = '/%s/datasets/?order=-id' % self.api
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify 2 results
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
self.assertEqual(result['results'][0]['id'], self.dataset2.id)
"""Tests the v6/datasets POST calls """
class TestDataSetPostView(APITestCase):
"""Tests the v6/dataset/ POST API call"""
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
def test_invalid_definition(self):
"""Tests successfully calling POST with an invalid definition."""
json_data = {}
url = '/%s/datasets/' % self.api
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
definition = copy.deepcopy(dataset_test_utils.DATASET_DEFINITION)
del definition['global_data']['json']['input_c']
json_data = {
'title': 'My Dataset',
'description': 'A test dataset',
'definition': definition,
}
url = '/%s/datasets/' % self.api
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_add_dataset(self):
"""Tests adding a new dataset"""
url = '/%s/datasets/' % self.api
json_data = {
'title': 'My Dataset',
'description': 'A test dataset',
'definition': copy.deepcopy(dataset_test_utils.DATASET_DEFINITION),
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
new_dataset_id = result['id']
self.assertTrue('/%s/datasets/%d/' % (self.api, new_dataset_id) in response['location'])
self.assertEqual(result['title'], json_data['title'])
self.assertEqual(result['description'], json_data['description'])
# create another dataset
json_data_2 = {
'title': 'My Dataset 2',
'description': 'Another test dataset',
'definition': copy.deepcopy(dataset_test_utils.DATASET_DEFINITION),
}
response = self.client.generic('POST', url, json.dumps(json_data_2), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
new_dataset_id = result['id']
self.assertTrue('/%s/datasets/%d/' % (self.api, new_dataset_id) in response['location'])
self.assertEqual(result['title'], json_data_2['title'])
self.assertEqual(result['description'], json_data_2['description'])
def test_create_dataset_with_members(self):
"""Tests creating a dataset along with a bunch of members"""
title = 'Test Dataset'
description = 'Test DataSet description'
file1 = storage_utils.create_file()
file2 = storage_utils.create_file()
file3 = storage_utils.create_file()
file4 = storage_utils.create_file()
# call test
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True},
{'name': 'input_b',
'media_types': ['application/json'],
'multiple': True,
'required': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
json_data = {
'title': title,
'description': description,
'definition': definition,
'data': {
'version': '7',
'files': {
'input_a': [file1.id],
'input_b': [file2.id, file3.id],
'input_c': [file4.id],
},
'json': {}
},
}
url = '/%s/datasets/' % self.api
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
new_dataset_id = result['id']
self.assertTrue('/%s/datasets/%d/' % (self.api, new_dataset_id) in response['location'])
self.assertTrue(len(result['definition']['parameters']['files']), 3)
self.assertTrue(len(result['files']), 4)
"""Tests the v6/datasets/<dataset_id> endpoint"""
class TestDatasetDetailsView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# Create workspace
self.workspace = Workspace.objects.create(name='Test Workspace', is_active=True, created=now(),
last_modified=now())
# Create files
self.country = storage_utils.create_country()
self.src_file_a = storage_utils.create_file(file_name='input_a.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path', countries=[self.country],
workspace=self.workspace)
self.src_file_b = storage_utils.create_file(file_name='input_b.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_c = storage_utils.create_file(file_name='input_c.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b2 = storage_utils.create_file(file_name='input_b2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_e = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_f = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
for i in range(0,500):
storage_utils.create_file(source_collection='12345')
for i in range(0,500):
storage_utils.create_file(source_collection='123456')
# Create datasets
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
self.dataset = dataset_test_utils.create_dataset( title="Test Dataset 1",
description="Test Dataset Number 1",
definition=definition)
parameters2 = {'version': '6',
'files': [
{'name': 'input_b',
'media_types': ['application/json'],
'required': True,
'multiple': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': False}
],
'json': []}
definition2 = {'version': '6', 'parameters': parameters2}
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2",
description="Test Dataset Number 2",
definition=definition2)
# Create members
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
data = DataV6(data=data_dict).get_dict()
self.member_a = dataset_test_utils.create_dataset_members(dataset=self.dataset,
data_list=[data])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id]},
'json': {}
}
data2 = DataV6(data=data_dict).get_dict()
self.member_b = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data2])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id], 'input_c': [self.src_file_c.id]},
'json': {}
}
data3 = DataV6(data=data_dict).get_dict()
self.member_bc = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data3])[0]
def test_dataset_details_successful(self):
"""Tests successfully calling the v6/datasets/<dataset_id>/ view.
"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.dataset.id)
self.assertEqual(result['title'], self.dataset.title)
self.assertEqual(result['description'], self.dataset.description)
dsdict = DataSetDefinitionV6(definition=self.dataset.definition).get_dict()
del dsdict['version']
self.assertDictEqual(result['definition'], dsdict)
self.assertEqual(len(result['files']), 1)
self.assertIsNotNone(result['files'][0]['scale_file']["countries"])
url = '/%s/datasets/%d/' % (self.api, self.dataset2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.dataset2.id)
self.assertEqual(result['title'], self.dataset2.title)
self.assertEqual(result['description'], self.dataset2.description)
self.maxDiff = None
dsdict = DataSetDefinitionV6(definition=self.dataset2.definition).get_dict()
del dsdict['version']
self.assertDictEqual(result['definition'], self.dataset2.definition)
self.assertEqual(len(result['files']), 3)
def test_add_dataset_member(self):
"""Tests adding a new dataset member"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
json_data = {
'data': [data_dict],
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 1)
def test_add_filter_dataset_members(self):
"""Tests adding new dataset members based on a filter"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
template = {
'version': '6',
'files': {'input_a': 'FILE_VALUE'},
'json': {}
}
json_data = {
'data_template': template,
'source_collection': '12345'
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 500)
json_data = {
'data_template': template,
'source_collection': ['12345', '123456']
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 1000)
def test_add_filter_dataset_members_dry_run(self):
"""Tests adding new dataset members based on a filter"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
template = {
'version': '6',
'files': {'input_a': 'FILE_VALUE'},
'json': {}
}
json_data = {
'data_template': template,
'source_collection': '12345',
'dry_run': True
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 500)
def test_add_invalid_dataset_member(self):
"""Tests adding an invalid new dataset member"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_a.id]},
'json': {}
}
json_data = {
'data': [data_dict],
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
class TestDataSetValidationView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
def test_validate_successful(self):
"""Tests successfully validating a new dataset using the v6/datasets/validation API
"""
url = '/%s/datasets/validation/' % self.api
json_data = {
'title': 'Test Dataset',
'description': 'My Test Dataset',
'definition': dataset_test_utils.DATASET_DEFINITION,
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
results = json.loads(response.content)
self.assertTrue(results['is_valid'])
self.assertEqual(len(results['warnings']), 0)
self.assertEqual(len(results['errors']), 0)
def test_validate_missing_definition(self):
url = '/%s/datasets/validation/' % self.api
json_data = {
'title': 'Test Dataset',
'description': 'My Test Dataset',
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
results = json.loads(response.content)
self.assertEqual(results['detail'], "Missing required parameter: \"definition\"")
def test_invalid_definition(self):
"""Validates an invalid dataset definition
"""
url = '/%s/datasets/validation/' % self.api
json_data = {
'title': 'Test Dataset',
'description': 'My Test Dataset',
'definition': {
'version': '6',
'parameters': [
{
'name': 'global-param',
},
{
'name': 'member-param',
},
],
},
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
results = json.loads(response.content)
self.assertFalse(results['is_valid'])
self.assertEqual(len(results['errors']), 1)
self.assertEqual(results['errors'][0]['name'], 'INVALID_DATASET_DEFINITION')
"""Tests the v6/datasets/%d/members/ endpoint"""
class TestDatasetMembersView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# Create workspace
self.workspace = Workspace.objects.create(name='Test Workspace', is_active=True, created=now(),
last_modified=now())
# Create files
self.src_file_a = storage_utils.create_file(file_name='input_a.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b = storage_utils.create_file(file_name='input_b.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_c = storage_utils.create_file(file_name='input_c.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b2 = storage_utils.create_file(file_name='input_b2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_e = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_f = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
# Create datasets
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
self.dataset = dataset_test_utils.create_dataset( title="Test Dataset 1",
description="Test Dataset Number 1",
definition=definition)
parameters2 = {'version': '6',
'files': [
{'name': 'input_b',
'media_types': ['application/json'],
'required': True,
'multiple': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': False}
],
'json': []}
definition2 = {'version': '6', 'parameters': parameters2}
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2",
description="Test Dataset Number 2",
definition=definition2)
# Create members
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
data = DataV6(data=data_dict).get_dict()
self.member_a = dataset_test_utils.create_dataset_members(dataset=self.dataset,
data_list=[data])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id]},
'json': {}
}
data2 = DataV6(data=data_dict).get_dict()
self.member_b = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data2])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id], 'input_c': [self.src_file_c.id]},
'json': {}
}
data3 = DataV6(data=data_dict).get_dict()
self.member_bc = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data3])[0]
def test_dataset_members_successful(self):
"""Tests successfully calling the v6/datasets/members/<id>/ view.
"""
url = '/%s/datasets/%d/members/' % (self.api, self.dataset.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset members
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/%d/members/' % (self.api, self.dataset2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset members
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
"""Tests the v6/datasets/members/<datasetmember_id> endpoint"""
class TestDatasetMemberDetailsView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# Create workspace
self.workspace = Workspace.objects.create(name='Test Workspace', is_active=True, created=now(),
last_modified=now())
# Create files
self.src_file_a = storage_utils.create_file(file_name='input_a.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b = storage_utils.create_file(file_name='input_b.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_c = storage_utils.create_file(file_name='input_c.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b2 = storage_utils.create_file(file_name='input_b2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_e = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_f = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
# Create datasets
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
self.dataset = dataset_test_utils.create_dataset( title="Test Dataset 1",
description="Test Dataset Number 1",
definition=definition)
parameters2 = {'version': '6',
'files': [
{'name': 'input_b',
'media_types': ['application/json'],
'required': True,
'multiple': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': False}
],
'json': []}
definition2 = {'version': '6', 'parameters': parameters2}
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2",
description="Test Dataset Number 2",
definition=definition2)
# Create members
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
data = DataV6(data=data_dict).get_dict()
self.member_a = dataset_test_utils.create_dataset_members(dataset=self.dataset,
data_list=[data])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id]},
'json': {}
}
data2 = DataV6(data=data_dict).get_dict()
self.member_b = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data2])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id], 'input_c': [self.src_file_c.id]},
'json': {}
}
data3 = DataV6(data=data_dict).get_dict()
self.member_bc = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data3])[0]
def test_dataset_member_details_successful(self):
"""Tests successfully calling the v6/datasets/members/<id>/ view.
"""
url = '/%s/datasets/members/%d/' % (self.api, self.member_a.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.member_a.id)
self.assertEqual(result['dataset']['id'], self.dataset.id)
versionless = copy.deepcopy(self.member_a.data)
del versionless['version']
self.assertDictEqual(result['data'], versionless)
url = '/%s/datasets/members/%d/' % (self.api, self.member_b.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.member_b.id)
self.assertEqual(result['dataset']['id'], self.dataset2.id)
versionless = copy.deepcopy(self.member_b.data)
del versionless['version']
self.assertDictEqual(result['data'], versionless)
url = '/%s/datasets/members/%d/' % (self.api, self.member_bc.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.member_bc.id)
self.assertEqual(result['dataset']['id'], self.dataset2.id)
versionless = copy.deepcopy(self.member_bc.data)
del versionless['version']
self.assertDictEqual(result['data'], versionless)
|
|
# -*- coding: utf-8 -*-
# lib to create fake survival datasets
import numpy as np
import pandas as pd
from scipy import stats
from scipy.optimize import newton
from scipy.integrate import cumtrapz
random = np.random
def piecewise_exponential_survival_data(n, breakpoints, lambdas):
"""
Note
--------
No censoring is present here.
Examples
--------
>>> T = piecewise_exponential_survival_data(100000, [1, 3], [0.2, 3, 1.])
>>> NelsonAalenFitter().fit(T).plot()
"""
assert len(breakpoints) == len(lambdas) - 1
breakpoints = np.append([0], breakpoints)
delta_breakpoints = np.diff(breakpoints)
T = np.empty(n)
for i in range(n):
U = random.random()
E = -np.log(U)
running_sum = 0
for delta, lambda_, bp in zip(delta_breakpoints, lambdas, breakpoints):
factor = lambda_ * delta
if E < running_sum + factor:
t = bp + (E - running_sum) / lambda_
break
running_sum += factor
else:
t = breakpoints[-1] + (E - running_sum) / lambdas[-1]
T[i] = t
return T
def exponential_survival_data(n, cr=0.05, scale=1.0):
t = stats.expon.rvs(scale=scale, size=n)
if cr == 0.0:
return t, np.ones(n, dtype=bool)
def pF(h):
v = 1.0 * h / scale
return v / (np.exp(v) - 1) - cr
# find the threshold:
h = newton(pF, 1.0, maxiter=500)
# generate truncated data
# pylint: disable=invalid-unary-operand-type
R = (1 - np.exp(-h / scale)) * stats.uniform.rvs(size=n)
entrance = -np.log(1 - R) * scale
C = (t + entrance) < h # should occur 1-cr of the time.
T = np.minimum(h - entrance, t)
return T, C
# Models with covariates
class coeff_func:
"""This is a decorator class used later to construct nice names"""
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
def __repr__():
s = self.f.__doc__.replace("alpha", "%.4f" % kwargs["alpha"]).replace("beta", "%.4f" % kwargs["beta"])
return s
self.__doc__ = __repr__()
self.__repr__ = __repr__
self.__str__ = __repr__
return self.f(*args, **kwargs)
@coeff_func
def exp_comp_(t, alpha=1, beta=1):
"""beta*(1 - np.exp(-alpha*(t-beta)))"""
return beta * (1 - np.exp(-alpha * np.maximum(0, t - 10 * beta)))
@coeff_func
def log_(t, alpha=1, beta=1):
"""beta*np.log(alpha*(t-beta)+1)"""
return beta * np.log(alpha * np.maximum(0, t - 10 * beta) + 1)
@coeff_func
def inverseSq_(t, alpha=1, beta=1):
"""beta/(t+alpha+1)**(0.5)"""
return beta / (t + alpha + 1) ** (0.5)
@coeff_func
def periodic_(t, alpha=1, beta=1):
"""abs(0.5*beta*sin(0.1*alpha*t + alpha*beta))"""
return 0.5 * beta * np.sin(0.1 * alpha * t)
@coeff_func
def constant_(t, alpha=1, beta=1): # pylint: disable=unused-argument
"""beta"""
return beta
FUNCS = [exp_comp_, log_, inverseSq_, constant_, periodic_]
def right_censor_lifetimes(lifetimes, max_, min_=0):
"""
Right censor the deaths, uniformly
lifetimes: (n,) array of positive random variables
max_: the max time a censorship can occur
min_: the min time a censorship can occur
Returns
The actual observations including uniform right censoring, and
D_i (observed death or did not)
I think this is deprecated
"""
n = lifetimes.shape[0]
u = min_ + (max_ - min_) * random.rand(n)
observations = np.minimum(u, lifetimes)
return observations, lifetimes == observations
def generate_covariates(n, d, n_binary=0, p=0.5):
"""
n: the number of instances, integer
d: the dimension of the covarites, integer
binary: a float between 0 and d the represents the binary covariates
p: in binary, the probability of 1
returns (n, d+1)
"""
# pylint: disable=chained-comparison
assert n_binary >= 0 and n_binary <= d, "binary must be between 0 and d"
covariates = np.zeros((n, d + 1))
covariates[:, : d - n_binary] = random.exponential(1, size=(n, d - n_binary))
covariates[:, d - n_binary : -1] = random.binomial(1, p, size=(n, n_binary))
covariates[:, -1] = np.ones(n)
return covariates
def constant_coefficients(d, timelines, constant=True, independent=0):
"""
Proportional hazards model.
d: the dimension of the dataset
timelines: the observational times
constant: True for constant coefficients
independent: the number of coffients to set to 0 (covariate is ind of survival), or
a list of covariates to make indepent.
returns a matrix (t,d+1) of coefficients
"""
return time_varying_coefficients(d, timelines, constant, independent=independent, randgen=random.normal)
def time_varying_coefficients(d, timelines, constant=False, independent=0, randgen=random.exponential):
"""
Time vary coefficients
d: the dimension of the dataset
timelines: the observational times
constant: True for constant coefficients
independent: the number of coffients to set to 0 (covariate is ind of survival), or
a list of covariates to make indepent.
randgen: how scalar coefficients (betas) are sampled.
returns a matrix (t,d+1) of coefficients
"""
t = timelines.shape[0]
try:
a = np.arange(d)
random.shuffle(a)
independent = a[:independent]
except IndexError:
pass
n_funcs = len(FUNCS)
coefficients = np.zeros((t, d))
data_generators = []
for i in range(d):
f = FUNCS[random.randint(0, n_funcs)] if not constant else constant_
if i in independent:
beta = 0
else:
beta = randgen((1 - constant) * 0.5 / d)
coefficients[:, i] = f(timelines, alpha=randgen(2000.0 / t), beta=beta)
data_generators.append(f.__doc__)
df_coefficients = pd.DataFrame(coefficients, columns=data_generators, index=timelines)
return df_coefficients
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
"""
n: the number of instances
d: the number of covariates
lifelines: the observational times
constant: make the coeffients constant (not time dependent)
n_binary: the number of binary covariates
model: from ["aalen", "cox"]
Returns:s
hazard rates: (t,n) dataframe,
coefficients: (t,d+1) dataframe of coefficients,
covarites: (n,d) dataframe
"""
covariates = generate_covariates(n, d, n_binary=n_binary)
if model == "aalen":
coefficients = time_varying_coefficients(d + 1, timelines, independent=independent, constant=constant)
hazard_rates = np.dot(covariates, coefficients.T)
return (pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates))
if model == "cox":
covariates = covariates[:, :-1]
coefficients = constant_coefficients(d, timelines, independent)
baseline = time_varying_coefficients(1, timelines)
hazard_rates = np.exp(np.dot(covariates, coefficients.T)) * baseline[baseline.columns[0]].values
coefficients["baseline: " + baseline.columns[0]] = baseline.values
return (pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates))
raise Exception
def generate_random_lifetimes(hazard_rates, timelines, size=1, censor=None):
"""
Based on the hazard rates, compute random variables from the survival function
hazard_rates: (n,t) array of hazard rates
timelines: (t,) the observation times
size: the number to return, per hardard rate
censor: If True, adds uniform censoring between timelines.max() and 0
If a postive number, censors all events above that value.
If (n,) np.array >=0 , censor elementwise.
Returns
-------
survival_times: (size,n) array of random variables.
(optional) censorship: if censor is true, returns (size,n) array with bool True
if the death was observed (not right-censored)
"""
n = hazard_rates.shape[1]
survival_times = np.empty((n, size))
cumulative_hazards = cumulative_integral(hazard_rates.values, timelines).T
for i in range(size):
u = random.rand(n, 1)
e = -np.log(u)
v = (e - cumulative_hazards) < 0
cross = v.argmax(1)
survival_times[:, i] = timelines[cross]
survival_times[cross == 0, i] = np.inf
if censor is not None:
if isinstance(censor, bool):
T = timelines.max()
rv = T * random.uniform(size=survival_times.shape)
else:
rv = censor
observed = np.less_equal(survival_times, rv)
survival_times = np.minimum(rv, survival_times)
return survival_times.T, observed.T
else:
return survival_times
def generate_observational_matrix(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
hz, coeff, covariates = generate_hazard_rates(n, d, timelines, constant, independent, n_binary, model=model)
R = generate_random_lifetimes(hz, timelines)
covariates["event_at"] = R.T[0]
return (
covariates.sort_values(by="event_at"),
pd.DataFrame(cumulative_integral(coeff.values, timelines), columns=coeff.columns, index=timelines),
)
def cumulative_integral(fx, x):
"""
Return the cumulative integral of arrays, initial value is 0.
Parameters
----------
fx: (n,d) numpy array, what you want to integral of
x: (n,) numpy array, location to integrate over.
"""
return cumtrapz(fx.T, x, initial=0).T
def construct_survival_curves(hazard_rates, timelines):
"""
Given hazard rates, reconstruct the survival curves
Parameters
----------
hazard_rates: (n,t) array
timelines: (t,) the observational times
Returns
-------
t: survial curves, (n,t) array
"""
cumulative_hazards = cumulative_integral(hazard_rates.values, timelines)
return pd.DataFrame(np.exp(-cumulative_hazards), index=timelines)
|
|
"""
Installs and configures puppet
"""
import sys
import logging
import os
import platform
import time
from packstack.installer import utils
from packstack.installer import basedefs, output_messages
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.ospluginutils import (gethostlist,
manifestfiles,
scan_puppet_logfile,
validate_puppet_logfile)
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OSPUPPET"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
PUPPETDIR = os.path.abspath(os.path.join(basedefs.DIR_PROJECT_DIR, 'puppet'))
MODULEDIR = os.path.join(PUPPETDIR, "modules")
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Puppet configuration")
paramsList = [
]
groupDict = {"GROUP_NAME" : "PUPPET",
"DESCRIPTION" : "Puppet Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def getinstallhostlist(conf):
list = []
exclude_list = map(str.strip, conf['EXCLUDE_SERVERS'].split(','))
for host in gethostlist(conf):
if host not in exclude_list:
list.append(host)
return list
def initSequences(controller):
puppetpresteps = [
{'title': 'Clean Up', 'functions':[runCleanup]},
]
controller.insertSequence("Clean Up", [], [], puppetpresteps, index=0)
puppetsteps = [
{'title': 'Installing Dependencies', 'functions':[installdeps]},
{'title': 'Copying Puppet modules and manifests', 'functions':[copyPuppetModules]},
{'title': 'Applying Puppet manifests', 'functions':[applyPuppetManifest]},
]
controller.addSequence("Puppet", [], [], puppetsteps)
def runCleanup(config):
localserver = utils.ScriptRunner()
localserver.append("rm -rf %s/*pp" % basedefs.PUPPET_MANIFEST_DIR)
localserver.execute()
def installdeps(config):
for hostname in getinstallhostlist(controller.CONF):
server = utils.ScriptRunner(hostname)
for package in ("puppet", "openssh-clients", "tar", "nc"):
server.append("rpm -q %s || yum install -y %s" % (package, package))
server.execute()
def copyPuppetModules(config):
os_modules = ' '.join(('apache', 'cinder', 'concat',
'create_resources', 'firewall',
'glance', 'horizon', 'inifile',
'keystone', 'memcached', 'mysql',
'nova', 'openstack', 'packstack',
'qpid', 'neutron', 'rsync', 'ssh', 'stdlib',
'swift', 'sysctl', 'tempest', 'vcsrepo',
'vlan', 'vswitch', 'xinetd'))
# write puppet manifest to disk
manifestfiles.writeManifests()
server = utils.ScriptRunner()
tar_opts = ""
if platform.linux_distribution()[0] == "Fedora":
tar_opts += "--exclude create_resources "
for hostname in getinstallhostlist(controller.CONF):
host_dir = controller.temp_map[hostname]
puppet_dir = os.path.join(host_dir, basedefs.PUPPET_MANIFEST_RELATIVE)
server.append("cd %s/puppet" % basedefs.DIR_PROJECT_DIR)
# copy Packstack facts
server.append("tar %s --dereference -cpzf - facts | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (tar_opts, hostname, host_dir))
# copy Packstack manifests
server.append("cd %s" % basedefs.PUPPET_MANIFEST_DIR)
server.append("tar %s --dereference -cpzf - ../manifests | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (tar_opts, hostname, host_dir))
# copy resources
for path, localname in controller.resources.get(hostname, []):
server.append("scp -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null %s root@%s:%s/resources/%s" %
(path, hostname, host_dir, localname))
# copy Puppet modules required by Packstack
server.append("cd %s/puppet/modules" % basedefs.DIR_PROJECT_DIR)
server.append("tar %s --dereference -cpzf - %s | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" %
(tar_opts, os_modules, hostname,
os.path.join(host_dir, 'modules')))
server.execute()
def waitforpuppet(currently_running):
global controller
log_len = 0
twirl = ["-","\\","|","/"]
while currently_running:
for hostname, finished_logfile in currently_running:
log_file = os.path.splitext(os.path.basename(finished_logfile))[0]
space_len = basedefs.SPACE_LEN - len(log_file)
if len(log_file) > log_len:
log_len = len(log_file)
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
twirl = twirl[-1:] + twirl[:-1]
sys.stdout.write(("\rTesting if puppet apply is finished : %s" % log_file).ljust(40 + log_len))
sys.stdout.write("[ %s ]" % twirl[0])
sys.stdout.flush()
try:
# Once a remote puppet run has finished, we retrieve the log
# file and check it for errors
local_server = utils.ScriptRunner()
log = os.path.join(basedefs.PUPPET_MANIFEST_DIR,
os.path.basename(finished_logfile).replace(".finished", ".log"))
local_server.append('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@%s:%s %s' % (hostname, finished_logfile, log))
# Errors are expected here if the puppet run isn't finished so we suppress logging them
local_server.execute(logerrors=False)
# If we got to this point the puppet apply has finished
currently_running.remove((hostname, finished_logfile))
# clean off the last "testing apply" msg
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
sys.stdout.write(("\r").ljust(45 + log_len))
except ScriptRuntimeError, e:
# the test raises an exception if the file doesn't exist yet
# TO-DO: We need to start testing 'e' for unexpected exceptions
time.sleep(3)
continue
# check log file for relevant notices
controller.MESSAGES.extend(scan_puppet_logfile(log))
# check the log file for errors
validate_puppet_logfile(log)
sys.stdout.write(("\r%s : " % log_file).ljust(basedefs.SPACE_LEN))
print ("[ " + utils.color_text(output_messages.INFO_DONE, 'green') + " ]")
def applyPuppetManifest(config):
print
currently_running = []
lastmarker = None
for manifest, marker in manifestfiles.getFiles():
# if the marker has changed then we don't want to proceed until
# all of the previous puppet runs have finished
if lastmarker != None and lastmarker != marker:
waitforpuppet(currently_running)
lastmarker = marker
for hostname in getinstallhostlist(controller.CONF):
if "%s_" % hostname not in manifest:
continue
host_dir = controller.temp_map[hostname]
print "Applying " + manifest
server = utils.ScriptRunner(hostname)
man_path = os.path.join(controller.temp_map[hostname],
basedefs.PUPPET_MANIFEST_RELATIVE,
manifest)
running_logfile = "%s.running" % man_path
finished_logfile = "%s.finished" % man_path
currently_running.append((hostname, finished_logfile))
# The apache puppet module doesn't work if we set FACTERLIB
# https://github.com/puppetlabs/puppetlabs-apache/pull/138
if not (manifest.endswith('_horizon.pp') or manifest.endswith('_nagios.pp')):
server.append("export FACTERLIB=$FACTERLIB:%s/facts" % host_dir)
server.append("touch %s" % running_logfile)
server.append("chmod 600 %s" % running_logfile)
server.append("export PACKSTACK_VAR_DIR=%s" % host_dir)
loglevel = ''
if logging.root.level <= logging.DEBUG:
loglevel = '--debug'
command = "( flock %s/ps.lock puppet apply %s --modulepath %s/modules %s > %s 2>&1 < /dev/null ; mv %s %s ) > /dev/null 2>&1 < /dev/null &" % (host_dir, loglevel, host_dir, man_path, running_logfile, running_logfile, finished_logfile)
server.append(command)
server.execute()
# wait for outstanding puppet runs befor exiting
waitforpuppet(currently_running)
|
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from touchdown.core.resource import Resource
from touchdown.core.plan import Plan, Present
from touchdown.core import argument, serializers
from touchdown.core.action import Action
from touchdown.core import errors
from ..account import BaseAccount
from ..iam import ServerCertificate
from ..s3 import Bucket
from ..common import SimpleDescribe, SimpleApply, SimpleDestroy
from ..vpc import Subnet, SecurityGroup
from .. import route53
class Listener(Resource):
resource_name = "listener"
protocol = argument.String(field="Protocol")
port = argument.Integer(field="LoadBalancerPort")
instance_protocol = argument.String(field="InstanceProtocol")
instance_port = argument.Integer(field="InstancePort")
ssl_certificate = argument.Resource(
ServerCertificate,
field="SSLCertificiateId",
serializer=serializers.Property("Arn"),
)
class HealthCheck(Resource):
resource_name = "health_check"
dot_ignore = True
interval = argument.Integer(field="Interval")
check = argument.String(field="Target")
healthy_threshold = argument.Integer(field="HealthyThreshold")
unhealthy_threshold = argument.Integer(field="UnhealthyThreshold")
timeout = argument.Integer(field="Timeout")
class Attributes(Resource):
resource_name = "attributes"
dot_ignore = True
idle_timeout = argument.Integer(
default=30,
field="ConnectionSettings",
serializer=serializers.Dict(
IdleTimeout=serializers.Identity(),
),
)
connection_draining = argument.Integer(
default=0,
field="ConnectionDraining",
serializer=serializers.Dict(
Enabled=serializers.Expression(lambda runner, object: object > 0),
Timeout=serializers.Identity(),
)
)
cross_zone_load_balancing = argument.Boolean(
default=True,
field="CrossZoneLoadBalancing",
serializer=serializers.Dict(
Enabled=serializers.Identity(),
)
)
access_log = argument.Resource(
Bucket,
field="AccessLog",
serializer=serializers.Dict(
Enabled=serializers.Expression(lambda runner, object: object is not None),
S3BucketName=serializers.Identifier(),
)
)
# FIXME Support EmitInterval and S3BucketPrefix
class LoadBalancer(Resource):
resource_name = "load_balancer"
name = argument.String(field="LoadBalancerName")
listeners = argument.ResourceList(
Listener,
field="Listeners",
serializer=serializers.List(serializers.Resource()),
)
availability_zones = argument.List(field="AvailabilityZones")
scheme = argument.String(choices=["internet-facing", "private"], field="Scheme")
subnets = argument.ResourceList(Subnet, field="Subnets")
security_groups = argument.ResourceList(SecurityGroup, field="SecurityGroups")
# tags = argument.Dict()
health_check = argument.Resource(HealthCheck)
attributes = argument.Resource(Attributes)
account = argument.Resource(BaseAccount)
class Describe(SimpleDescribe, Plan):
resource = LoadBalancer
service_name = 'elb'
describe_action = "describe_load_balancers"
describe_envelope = "LoadBalancerDescriptions"
describe_notfound_exception = "LoadBalancerNotFound"
key = 'LoadBalancerName'
def get_describe_filters(self):
return {"LoadBalancerNames": [self.resource.name]}
class Apply(SimpleApply, Describe):
create_action = "create_load_balancer"
create_response = "not-that-useful"
signature = [
Present('name'),
Present('listeners'),
]
def update_attributes(self):
if not self.resource.attributes:
return
a = self.resource.attributes
changed = False
if not self.object:
changed = True
else:
attributes = self.client.describe_load_balancer_attributes(
LoadBalancerName=self.resource_id
)['LoadBalancerAttributes']
if attributes['ConnectionSettings']['IdleTimeout'] != a.idle_timeout:
changed = True
if attributes['ConnectionDraining']['Timeout'] != a.connection_draining:
changed = True
if attributes['CrossZoneLoadBalancing']['Enabled'] != a.cross_zone_load_balancing:
changed = True
if attributes['AccessLog'].get('S3BucketName', None) != a.access_log:
changed = True
if changed:
yield self.generic_action(
"Configure attributes",
self.client.modify_load_balancer_attributes,
LoadBalancerName=serializers.Identifier(),
LoadBalancerAttributes=serializers.Context(
serializers.Const(a),
serializers.Resource()
),
)
def update_health_check(self):
if not self.object and self.resource.health_check:
yield self.generic_action(
"Configure health check",
self.client.configure_health_check,
LoadBalancerName=self.resource.name,
HealthCheck=serializers.Context(
serializers.Const(self.resource.health_check),
serializers.Resource(),
),
)
def update_object(self):
for action in super(Apply, self).update_object():
yield action
for action in self.update_attributes():
yield action
for action in self.update_health_check():
yield action
class WaitForNetworkInterfaces(Action):
description = ["Wait for network interfaces to be released"]
def run(self):
# We have to query ec2 and not elb api, so create a new client
client = self.plan.session.create_client("ec2")
description = "ELB {}".format(self.plan.resource.name)
for i in range(120):
interfaces = client.describe_network_interfaces(
Filters=[
{"Name": "description", "Values": [description]},
]
).get('NetworkInterfaces', [])
if len(interfaces) == 0:
return
time.sleep(1)
raise errors.Error(
"Load balancer {} still hanging around in Elastic Network Interfaces after deletion for over 2 minutes.".format(
self.plan.resource_id,
)
)
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_load_balancer"
def destroy_object(self):
for change in super(Destroy, self).destroy_object():
yield change
yield WaitForNetworkInterfaces(self)
class AliasTarget(route53.AliasTarget):
""" Adapts a LoadBalancer into a AliasTarget """
input = LoadBalancer
def get_serializer(self, runner, **kwargs):
return serializers.Context(
serializers.Const(self.adapts),
serializers.Dict(
DNSName=serializers.Context(
serializers.Property("CanonicalHostedZoneName"),
serializers.Expression(lambda r, o: route53._normalize(o)),
),
HostedZoneId=serializers.Property("CanonicalHostedZoneNameID"),
EvaluateTargetHealth=False,
)
)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9451")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9451")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Creepercoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Creepercoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
"""
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, wheel_cache=None):
"""
Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
opts, args = parser.parse_args(shlex.split(line), defaults)
# yield a line requirement
if args:
args_line = ' '.join(args)
comes_from = '-r %s (line %s)' % (filename, line_number)
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_line, comes_from, isolated=isolated, options=req_options,
wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
comes_from = '-r %s (line %s)' % (filename, line_number)
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=comes_from,
default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements:
req_path = opts.requirements[0]
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
|
|
"""Mixin classes with arithmetic operators."""
# This file was generated using xarray.util.generate_ops. Do not edit manually.
import operator
from . import nputils, ops
class DatasetOpsMixin:
__slots__ = ()
def _binary_op(self, other, f, reflexive=False):
raise NotImplementedError
def __add__(self, other):
return self._binary_op(other, operator.add)
def __sub__(self, other):
return self._binary_op(other, operator.sub)
def __mul__(self, other):
return self._binary_op(other, operator.mul)
def __pow__(self, other):
return self._binary_op(other, operator.pow)
def __truediv__(self, other):
return self._binary_op(other, operator.truediv)
def __floordiv__(self, other):
return self._binary_op(other, operator.floordiv)
def __mod__(self, other):
return self._binary_op(other, operator.mod)
def __and__(self, other):
return self._binary_op(other, operator.and_)
def __xor__(self, other):
return self._binary_op(other, operator.xor)
def __or__(self, other):
return self._binary_op(other, operator.or_)
def __lt__(self, other):
return self._binary_op(other, operator.lt)
def __le__(self, other):
return self._binary_op(other, operator.le)
def __gt__(self, other):
return self._binary_op(other, operator.gt)
def __ge__(self, other):
return self._binary_op(other, operator.ge)
def __eq__(self, other):
return self._binary_op(other, nputils.array_eq)
def __ne__(self, other):
return self._binary_op(other, nputils.array_ne)
def __radd__(self, other):
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other):
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other):
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other):
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other):
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other):
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other):
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other):
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other):
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other):
return self._binary_op(other, operator.or_, reflexive=True)
def _inplace_binary_op(self, other, f):
raise NotImplementedError
def __iadd__(self, other):
return self._inplace_binary_op(other, operator.iadd)
def __isub__(self, other):
return self._inplace_binary_op(other, operator.isub)
def __imul__(self, other):
return self._inplace_binary_op(other, operator.imul)
def __ipow__(self, other):
return self._inplace_binary_op(other, operator.ipow)
def __itruediv__(self, other):
return self._inplace_binary_op(other, operator.itruediv)
def __ifloordiv__(self, other):
return self._inplace_binary_op(other, operator.ifloordiv)
def __imod__(self, other):
return self._inplace_binary_op(other, operator.imod)
def __iand__(self, other):
return self._inplace_binary_op(other, operator.iand)
def __ixor__(self, other):
return self._inplace_binary_op(other, operator.ixor)
def __ior__(self, other):
return self._inplace_binary_op(other, operator.ior)
def _unary_op(self, f, *args, **kwargs):
raise NotImplementedError
def __neg__(self):
return self._unary_op(operator.neg)
def __pos__(self):
return self._unary_op(operator.pos)
def __abs__(self):
return self._unary_op(operator.abs)
def __invert__(self):
return self._unary_op(operator.invert)
def round(self, *args, **kwargs):
return self._unary_op(ops.round_, *args, **kwargs)
def argsort(self, *args, **kwargs):
return self._unary_op(ops.argsort, *args, **kwargs)
def conj(self, *args, **kwargs):
return self._unary_op(ops.conj, *args, **kwargs)
def conjugate(self, *args, **kwargs):
return self._unary_op(ops.conjugate, *args, **kwargs)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
__iadd__.__doc__ = operator.iadd.__doc__
__isub__.__doc__ = operator.isub.__doc__
__imul__.__doc__ = operator.imul.__doc__
__ipow__.__doc__ = operator.ipow.__doc__
__itruediv__.__doc__ = operator.itruediv.__doc__
__ifloordiv__.__doc__ = operator.ifloordiv.__doc__
__imod__.__doc__ = operator.imod.__doc__
__iand__.__doc__ = operator.iand.__doc__
__ixor__.__doc__ = operator.ixor.__doc__
__ior__.__doc__ = operator.ior.__doc__
__neg__.__doc__ = operator.neg.__doc__
__pos__.__doc__ = operator.pos.__doc__
__abs__.__doc__ = operator.abs.__doc__
__invert__.__doc__ = operator.invert.__doc__
round.__doc__ = ops.round_.__doc__
argsort.__doc__ = ops.argsort.__doc__
conj.__doc__ = ops.conj.__doc__
conjugate.__doc__ = ops.conjugate.__doc__
class DataArrayOpsMixin:
__slots__ = ()
def _binary_op(self, other, f, reflexive=False):
raise NotImplementedError
def __add__(self, other):
return self._binary_op(other, operator.add)
def __sub__(self, other):
return self._binary_op(other, operator.sub)
def __mul__(self, other):
return self._binary_op(other, operator.mul)
def __pow__(self, other):
return self._binary_op(other, operator.pow)
def __truediv__(self, other):
return self._binary_op(other, operator.truediv)
def __floordiv__(self, other):
return self._binary_op(other, operator.floordiv)
def __mod__(self, other):
return self._binary_op(other, operator.mod)
def __and__(self, other):
return self._binary_op(other, operator.and_)
def __xor__(self, other):
return self._binary_op(other, operator.xor)
def __or__(self, other):
return self._binary_op(other, operator.or_)
def __lt__(self, other):
return self._binary_op(other, operator.lt)
def __le__(self, other):
return self._binary_op(other, operator.le)
def __gt__(self, other):
return self._binary_op(other, operator.gt)
def __ge__(self, other):
return self._binary_op(other, operator.ge)
def __eq__(self, other):
return self._binary_op(other, nputils.array_eq)
def __ne__(self, other):
return self._binary_op(other, nputils.array_ne)
def __radd__(self, other):
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other):
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other):
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other):
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other):
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other):
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other):
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other):
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other):
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other):
return self._binary_op(other, operator.or_, reflexive=True)
def _inplace_binary_op(self, other, f):
raise NotImplementedError
def __iadd__(self, other):
return self._inplace_binary_op(other, operator.iadd)
def __isub__(self, other):
return self._inplace_binary_op(other, operator.isub)
def __imul__(self, other):
return self._inplace_binary_op(other, operator.imul)
def __ipow__(self, other):
return self._inplace_binary_op(other, operator.ipow)
def __itruediv__(self, other):
return self._inplace_binary_op(other, operator.itruediv)
def __ifloordiv__(self, other):
return self._inplace_binary_op(other, operator.ifloordiv)
def __imod__(self, other):
return self._inplace_binary_op(other, operator.imod)
def __iand__(self, other):
return self._inplace_binary_op(other, operator.iand)
def __ixor__(self, other):
return self._inplace_binary_op(other, operator.ixor)
def __ior__(self, other):
return self._inplace_binary_op(other, operator.ior)
def _unary_op(self, f, *args, **kwargs):
raise NotImplementedError
def __neg__(self):
return self._unary_op(operator.neg)
def __pos__(self):
return self._unary_op(operator.pos)
def __abs__(self):
return self._unary_op(operator.abs)
def __invert__(self):
return self._unary_op(operator.invert)
def round(self, *args, **kwargs):
return self._unary_op(ops.round_, *args, **kwargs)
def argsort(self, *args, **kwargs):
return self._unary_op(ops.argsort, *args, **kwargs)
def conj(self, *args, **kwargs):
return self._unary_op(ops.conj, *args, **kwargs)
def conjugate(self, *args, **kwargs):
return self._unary_op(ops.conjugate, *args, **kwargs)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
__iadd__.__doc__ = operator.iadd.__doc__
__isub__.__doc__ = operator.isub.__doc__
__imul__.__doc__ = operator.imul.__doc__
__ipow__.__doc__ = operator.ipow.__doc__
__itruediv__.__doc__ = operator.itruediv.__doc__
__ifloordiv__.__doc__ = operator.ifloordiv.__doc__
__imod__.__doc__ = operator.imod.__doc__
__iand__.__doc__ = operator.iand.__doc__
__ixor__.__doc__ = operator.ixor.__doc__
__ior__.__doc__ = operator.ior.__doc__
__neg__.__doc__ = operator.neg.__doc__
__pos__.__doc__ = operator.pos.__doc__
__abs__.__doc__ = operator.abs.__doc__
__invert__.__doc__ = operator.invert.__doc__
round.__doc__ = ops.round_.__doc__
argsort.__doc__ = ops.argsort.__doc__
conj.__doc__ = ops.conj.__doc__
conjugate.__doc__ = ops.conjugate.__doc__
class VariableOpsMixin:
__slots__ = ()
def _binary_op(self, other, f, reflexive=False):
raise NotImplementedError
def __add__(self, other):
return self._binary_op(other, operator.add)
def __sub__(self, other):
return self._binary_op(other, operator.sub)
def __mul__(self, other):
return self._binary_op(other, operator.mul)
def __pow__(self, other):
return self._binary_op(other, operator.pow)
def __truediv__(self, other):
return self._binary_op(other, operator.truediv)
def __floordiv__(self, other):
return self._binary_op(other, operator.floordiv)
def __mod__(self, other):
return self._binary_op(other, operator.mod)
def __and__(self, other):
return self._binary_op(other, operator.and_)
def __xor__(self, other):
return self._binary_op(other, operator.xor)
def __or__(self, other):
return self._binary_op(other, operator.or_)
def __lt__(self, other):
return self._binary_op(other, operator.lt)
def __le__(self, other):
return self._binary_op(other, operator.le)
def __gt__(self, other):
return self._binary_op(other, operator.gt)
def __ge__(self, other):
return self._binary_op(other, operator.ge)
def __eq__(self, other):
return self._binary_op(other, nputils.array_eq)
def __ne__(self, other):
return self._binary_op(other, nputils.array_ne)
def __radd__(self, other):
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other):
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other):
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other):
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other):
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other):
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other):
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other):
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other):
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other):
return self._binary_op(other, operator.or_, reflexive=True)
def _inplace_binary_op(self, other, f):
raise NotImplementedError
def __iadd__(self, other):
return self._inplace_binary_op(other, operator.iadd)
def __isub__(self, other):
return self._inplace_binary_op(other, operator.isub)
def __imul__(self, other):
return self._inplace_binary_op(other, operator.imul)
def __ipow__(self, other):
return self._inplace_binary_op(other, operator.ipow)
def __itruediv__(self, other):
return self._inplace_binary_op(other, operator.itruediv)
def __ifloordiv__(self, other):
return self._inplace_binary_op(other, operator.ifloordiv)
def __imod__(self, other):
return self._inplace_binary_op(other, operator.imod)
def __iand__(self, other):
return self._inplace_binary_op(other, operator.iand)
def __ixor__(self, other):
return self._inplace_binary_op(other, operator.ixor)
def __ior__(self, other):
return self._inplace_binary_op(other, operator.ior)
def _unary_op(self, f, *args, **kwargs):
raise NotImplementedError
def __neg__(self):
return self._unary_op(operator.neg)
def __pos__(self):
return self._unary_op(operator.pos)
def __abs__(self):
return self._unary_op(operator.abs)
def __invert__(self):
return self._unary_op(operator.invert)
def round(self, *args, **kwargs):
return self._unary_op(ops.round_, *args, **kwargs)
def argsort(self, *args, **kwargs):
return self._unary_op(ops.argsort, *args, **kwargs)
def conj(self, *args, **kwargs):
return self._unary_op(ops.conj, *args, **kwargs)
def conjugate(self, *args, **kwargs):
return self._unary_op(ops.conjugate, *args, **kwargs)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
__iadd__.__doc__ = operator.iadd.__doc__
__isub__.__doc__ = operator.isub.__doc__
__imul__.__doc__ = operator.imul.__doc__
__ipow__.__doc__ = operator.ipow.__doc__
__itruediv__.__doc__ = operator.itruediv.__doc__
__ifloordiv__.__doc__ = operator.ifloordiv.__doc__
__imod__.__doc__ = operator.imod.__doc__
__iand__.__doc__ = operator.iand.__doc__
__ixor__.__doc__ = operator.ixor.__doc__
__ior__.__doc__ = operator.ior.__doc__
__neg__.__doc__ = operator.neg.__doc__
__pos__.__doc__ = operator.pos.__doc__
__abs__.__doc__ = operator.abs.__doc__
__invert__.__doc__ = operator.invert.__doc__
round.__doc__ = ops.round_.__doc__
argsort.__doc__ = ops.argsort.__doc__
conj.__doc__ = ops.conj.__doc__
conjugate.__doc__ = ops.conjugate.__doc__
class DatasetGroupByOpsMixin:
__slots__ = ()
def _binary_op(self, other, f, reflexive=False):
raise NotImplementedError
def __add__(self, other):
return self._binary_op(other, operator.add)
def __sub__(self, other):
return self._binary_op(other, operator.sub)
def __mul__(self, other):
return self._binary_op(other, operator.mul)
def __pow__(self, other):
return self._binary_op(other, operator.pow)
def __truediv__(self, other):
return self._binary_op(other, operator.truediv)
def __floordiv__(self, other):
return self._binary_op(other, operator.floordiv)
def __mod__(self, other):
return self._binary_op(other, operator.mod)
def __and__(self, other):
return self._binary_op(other, operator.and_)
def __xor__(self, other):
return self._binary_op(other, operator.xor)
def __or__(self, other):
return self._binary_op(other, operator.or_)
def __lt__(self, other):
return self._binary_op(other, operator.lt)
def __le__(self, other):
return self._binary_op(other, operator.le)
def __gt__(self, other):
return self._binary_op(other, operator.gt)
def __ge__(self, other):
return self._binary_op(other, operator.ge)
def __eq__(self, other):
return self._binary_op(other, nputils.array_eq)
def __ne__(self, other):
return self._binary_op(other, nputils.array_ne)
def __radd__(self, other):
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other):
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other):
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other):
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other):
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other):
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other):
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other):
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other):
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other):
return self._binary_op(other, operator.or_, reflexive=True)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
class DataArrayGroupByOpsMixin:
__slots__ = ()
def _binary_op(self, other, f, reflexive=False):
raise NotImplementedError
def __add__(self, other):
return self._binary_op(other, operator.add)
def __sub__(self, other):
return self._binary_op(other, operator.sub)
def __mul__(self, other):
return self._binary_op(other, operator.mul)
def __pow__(self, other):
return self._binary_op(other, operator.pow)
def __truediv__(self, other):
return self._binary_op(other, operator.truediv)
def __floordiv__(self, other):
return self._binary_op(other, operator.floordiv)
def __mod__(self, other):
return self._binary_op(other, operator.mod)
def __and__(self, other):
return self._binary_op(other, operator.and_)
def __xor__(self, other):
return self._binary_op(other, operator.xor)
def __or__(self, other):
return self._binary_op(other, operator.or_)
def __lt__(self, other):
return self._binary_op(other, operator.lt)
def __le__(self, other):
return self._binary_op(other, operator.le)
def __gt__(self, other):
return self._binary_op(other, operator.gt)
def __ge__(self, other):
return self._binary_op(other, operator.ge)
def __eq__(self, other):
return self._binary_op(other, nputils.array_eq)
def __ne__(self, other):
return self._binary_op(other, nputils.array_ne)
def __radd__(self, other):
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other):
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other):
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other):
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other):
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other):
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other):
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other):
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other):
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other):
return self._binary_op(other, operator.or_, reflexive=True)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
|
|
import curses
import sys
import time
import os.path
import random
import pickle
from curses import wrapper
gamedims = [5,22]
currPos = [10,10]
currPosList = [1,3]
def main(stdscr):#{
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(5, curses.COLOR_YELLOW, curses.COLOR_WHITE)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_WHITE)
curses.init_pair(7, curses.COLOR_CYAN, curses.COLOR_WHITE)
curses.init_pair(8, curses.COLOR_GREEN, curses.COLOR_WHITE)
curses.init_pair(9, curses.COLOR_BLUE, curses.COLOR_CYAN)
stdscr.clear()
stdscr.keypad(1)
curses.curs_set(0)
dims = stdscr.getmaxyx()
global sizeselector
maxheight = str(dims[0]-2)
maxwidth = str(dims[1]-2)
sizeselector.extend(['5',maxwidth])
sizeselector.extend(['5',maxheight])
with open('./constants/string.txt','w') as f:
for x in sizeselector:
f.write('%s\n' % x)
#if os.path.exists('./constants/size.pkl'):#{
# with open('./constants/size.pkl') as f:#{
# global gamedims
# gamedims = pickle.load(f)
# #}
##}
#else:#{
# global gamedims
# with open('./constants/size.pkl', 'w') as output:#{
# data = gamedims
# pickle.dump(data, output)
# #}
##}
while True:
selection = makeMenu(stdscr, gamemenu)
if selection == 1:#{
sampleselection = 0
while True:#{
if sampleselection == curses.KEY_BACKSPACE: break
initanswer=makeMenu(stdscr, initgame)
if initanswer == 1:#{
global gamedims
x = gamedims[0]
y = gamedims[1]
boardArray=[[0 for j in range(0,x)]for i in range(0,y)]
showProgress(stdscr)
drawBoard(stdscr, boardArray)
#}
elif initanswer == 2:#{
while True:#{
sampleselection=makeMenu(stdscr, samples)
if sampleselection == 1:#{
print ('1')
#}
elif sampleselection == 2:#{
print ('2')
#}
elif sampleselection == 3:#{
print ('3')
#}
elif sampleselection == 4:#{
print ('4')
#}
elif sampleselection == 5:#{
print ('5')
#}
elif sampleselection == 0:
break
#}
#}
elif initanswer == 0:#{
break
#}
#}
#}
elif selection == 2:#{
while True:#{
option = makeMenu(stdscr, optionsmenu)
if option == 1:#{
fieldsize=numberSelector(stdscr, sizeselector, gamedims)
if fieldsize != -1:#{
gamedims = fieldsize
break
#}
#}
elif option == 2:#{
characters = elemSelector(stdscr, charselector, elemArray, currPosList)
#}
elif option == 3:#{
rulesmenu=makeMenu(stdscr, rules)
if rulesmenu == 1:#{
print ('1')
elif rulesmenu == 2:#{
print ('2')
#}
#}
elif option == 0:#{
break
#}
#}
#}
elif selection == 3:#{
stdscr.clear()
stdscr.refresh()
curses.endwin()
output = open('./constants/size.pkl', 'w')
data = gamedims
pickle.dump(data, output)
output.close()
sys.exit()
#}
elif selection == 0:#{
stdscr.clear()
stdscr.refresh()
curses.endwin()
output = open('./constants/size.pkl', 'w')
data = gamedims
pickle.dump(data, output)
output.close()
sys.exit()
#}
#elif selection == ord('h'):#{
# stdscr.addstr(dims[0]-2,dims[1]/2-len(gamemenu[1])/2, gamemenu[1])
# stdscr.refresh()
##}
#}
def makeMenu(stdscr, L):#{
pos1 = 1
header = L[0]
optCount = len(L)-2
dims = stdscr.getmaxyx()
lenList = [0]
for i in L[2:]:#{
lenList.append(len(i))
#}
maxWidth = max(lenList)+1
while True:#{
for x in range (0,optCount):#{
if pos1 == x+1:#{
stdscr.addstr(dims[0]/2-optCount+2*x, dims[1]/2-maxWidth/2-2,'> '+L[x+2].center(maxWidth,' ')+' <', curses.color_pair(2))
#}
else:#{
stdscr.addstr(dims[0]/2-optCount+2*x, dims[1]/2-maxWidth/2-2, L[x+2].center(maxWidth+4, ' '))
#}
#}
for y in range (0,maxWidth+10):#{
stdscr.addstr(dims[0]/2-optCount-2, dims[1]/2-maxWidth/2+y-3-2, '=',curses.color_pair(2))
#}
stdscr.addnstr(dims[0]/2+optCount, dims[1]/2-maxWidth/2-3-2, ornament1 ,maxWidth+10,curses.color_pair(2))
for a in range (0,optCount*2+1):#{
stdscr.addstr(dims[0]/2-optCount-1+a, dims[1]/2-maxWidth/2+maxWidth+6-2,'I',curses.color_pair(2))
stdscr.addstr(dims[0]/2-optCount-1+a, dims[1]/2-maxWidth/2-3-2, 'I', curses.color_pair(2))
#}
stdscr.addstr(dims[0]/2+optCount+1, dims[1]/2-len('Press "h" for help')/2, 'Press "h" for help',curses.color_pair(2))
for b in range(0,len(header)):#{
stdscr.addstr(dims[0]/2-optCount-2, dims[1]/2-len(L[0])/2+b, header[b],curses.color_pair(random.randint(3,8)))
#}
stdscr.refresh()
selection1=stdscr.getch()
if selection1 == curses.KEY_UP and pos1 > 1:#{
pos1-=1
#}
elif selection1 == curses.KEY_DOWN and pos1 < optCount:#{
pos1+=1
#}
elif selection1 == 10:#{
stdscr.clear()
stdscr.refresh()
return pos1
break
#}
elif selection1 == curses.KEY_BACKSPACE:#{
stdscr.clear()
stdscr.refresh()
pos1 = 0
return pos1
break
#}
elif selection1 == ord('h'):#{
stdscr.addstr(dims[0]-2,dims[1]/2-len(L[1])/2, L[1])
#}
#}
#}
def drawBoard(stdscr, boardArray):#{
x = len(boardArray[0])
y = len(boardArray)
dims = stdscr.getmaxyx()
z1 = int(round(float(x)/2))-x/2
z2 = int(round(float(y)/2))-y/2
corner1 = [dims[0]/2-y/2-1,dims[1]/2-x-1]
corner2 = [dims[0]/2-y/2-1,dims[1]/2+x+1]
corner3 = [dims[0]/2+y/2+z2,dims[1]/2+x+1]
corner4 = [dims[0]/2+y/2+z2,dims[1]/2-x-1]
stdscr.addstr(corner1[0], corner1[1], "+", curses.A_REVERSE)
stdscr.addstr(corner2[0], corner2[1], "+", curses.A_REVERSE)
stdscr.addstr(corner3[0], corner3[1], "+", curses.A_REVERSE)
stdscr.addstr(corner4[0], corner4[1], "+", curses.A_REVERSE)
for k in range(1,x*2+2):#{
stdscr.addstr(corner1[0],corner1[1]+k, "-", curses.A_REVERSE)
stdscr.addstr(corner4[0]+z2,corner4[1]+k, "-", curses.A_REVERSE)
#}
for l in range(1,y):#{
stdscr.addstr(corner1[0]+l,corner1[1], "|", curses.A_REVERSE)
stdscr.addstr(corner2[0]+l,corner2[1], "|", curses.A_REVERSE)
#}
for i in range(0,y):#{
for j in range(0,x):#{
stdscr.addstr(corner1[0]+i, corner1[1]+2*j, ' '+str(boardArray[i][j]))
#}
#}
stdscr.refresh()
input = stdscr.getch()
stdscr.clear()
stdscr.refresh()
#}
def makePrompt(stdscr, L):#{
question = L[0]
header = L[1]
ansCount = len(L)-2
queLength = len(L[0])
dims = stdscr.getmaxyx()
while True:#{
stdscr.clear()
stdscr.box()
stdscr.addstr(dims[0]/2-5, dims[1]/2-queLength/2, L[0])
if ansCount == 2:#{
stdscr.addstr(dims[0]/2+2, dims[1]/2-len(L[2])-5, L[2]+'(1)')
stdscr.addstr(dims[0]/2+2, dims[1]/2+5, L[3]+'(2)')
#}
elif ansCount == 3:#{
stdscr.addstr(dims[0]/2+2, dims[1]/2-len(L[2])-4, L[2]+' (1)')
stdscr.addstr(dims[0]/2+2, dims[1]/2+4, L[3]+'(2)')
stdscr.addstr(dims[0]/2+4, dims[1]/2-len(L[4])/2, L[4]+'(3)')
#}
else:#{
for x in range(1,ansCount+1):
stdscr.addstr(dims[0]/2+2*x, dims[1]/2-len(L[x+1])/2, L[x+1]+'('+str(x)+')')
#}
stdscr.refresh()
answer = stdscr.getch()
if answer > ord('0') and answer <= ord(str(ansCount)):#{
stdscr.clear()
stdscr.refresh()
return answer
break
#}
elif answer == curses.KEY_BACKSPACE:#{
stdscr.clear()
stdscr.refresh()
answer = -1
return answer
break
#}
elif answer == ord('h'):#{
stdscr.addstr(dims[0]-2, dims[1]-len(L[1])-1, L[1])
stdscr.addstr(dims[0]-2, dims[1]/2, L[2])
#}
#}
#}
def showProgress(stdscr):#{
dims = stdscr.getmaxyx()
coords = [dims[0]/2-1,dims[1]/2-16]
win = stdscr.subwin(3,32,coords[0],coords[1])
win.border(0)
win.addstr(1,1,'Progress ')
win.refresh()
time.sleep(0.5)
pos = 10
for i in range(15):#{
win.addstr(1,pos,'.')
win.refresh()
time.sleep(0.02)
pos+=1
#}
win.addstr(1,26,'Done!')
win.refresh()
time.sleep(0.5)
win.clear()
win.refresh()
#}
def elemSelector(stdscr, L, elemArray, currPosList):#{
selections = len(elemArray)
if len(currPosList) == 1:#{
pos1 = currPosList[0]
#}
elif len(currPosList) == 2:#{
pos1 = currPosList[0]
pos2 = currPosList[1]
#}
elif len(currPosList) == 3:#{
pos1 = currPosList[0]
pos2 = currPosList[1]
pos3 = currPosList[2]
#}
elif len(currPosList) == 4:#{
pos1 = currPosList[0]
pos2 = currPosList[1]
pos3 = currPosList[2]
pos4 = currPosList[3]
#}
subject = L[0]
dims = stdscr.getmaxyx()
while True:#{
if selections == 1:#{
for x in range(0,3):#{
stdscr.addnstr(dims[0]/2-5+x, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
stdscr.addnstr(dims[0]/2+3+x, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
stdscr.addstr(dims[0]/2-5+1, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
#}
elif selections == 2:#{
for x in range(0,selections+1):#{
for y in range(0,3):
stdscr.addnstr(dims[0]/2-selections*9/2+8*x+y, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
stdscr.addstr(dims[0]/2-9+1, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
#}
elif selections > 2:#{
for x in range(0,selection+1):#{
for y in range(0,3):#{
stdscr.addnstr(dims[0]/2-selections*9/2+x*8+y, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
#}
stdscr.addstr(dims[0]/2-selections*8/2+1, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
#}
for a in range(0,selections):#{
for y in range(0,6):#{
stdscr.addstr(dims[0]/2-selections*9/2+3+y+a*8, dims[1]/2-16, ' ', curses.A_REVERSE)
stdscr.addstr(dims[0]/2-selections*9/2+3+y+a*8, dims[1]/2+16, ' ', curses.A_REVERSE)
#}
#}
for b in range(0,selections):#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b+1, dims[1]/2-13,'--- --- --- - - --- --- ---')
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b-1, dims[1]/2-13,'--- --- --- - - --- --- ---')
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b+1, dims[1]/2,'-', curses.color_pair(9))
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b-1, dims[1]/2,'-', curses.color_pair(9))
#}
if selections == 1:#{
if pos1 == 1:#{
stdscr.addstr(dims[0]/2, dims[1]/2-13, ' - - - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' ')
#}
elif pos1 == 2:#{
stdscr.addstr(dims[0]/2, dims[1]/2-13, ' - - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' '+str(elemArray[0][4])+' ')
#}
elif pos1 == 3:#{
stdscr.addstr(dims[0]/2, dims[1]/2-13, ' - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' '+str(elemArray[0][4])+' '+str(elemArray[0][5])+' ')
#}
if pos1 == len(elemArray[0]):#{
stdscr.addstr(dims[0]/2, dims[1]/2-13, str(elemArray[0][pos1-3])+' '+str(elemArray[0][pos1-2])+' '+str(elemArray[0][pos1-1])+' '+str(elemArray[0][pos1])+' - - - ')
#}
elif pos1 == len(elemArray[0])-1:#{
stdscr.addstr(dims[0]/2, dims[1]/2-13, str(elemArray[0][pos1-3])+' '+str(elemArray[0][pos1-2])+' '+str(elemArray[0][pos1-1])+' '+str(elemArray[0][pos1])+' '+str(elemArray[0][pos1+1])+' - - ')
#}
elif pos1 == len(elemArray[0])-2:#{
stdscr.addstr(dims[0]/2, dims[1]/2-13, str(elemArray[0][pos1-3])+' '+str(elemArray[0][pos1-2])+' '+str(elemArray[0][pos1-1])+' '+str(elemArray[0][pos1])+' '+str(elemArray[0][pos1+1])+' '+str(elemArray[0][pos1+2])+' - ')
#}
#}
elif selections == 2:#{
if pos1 == 1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-13, ' - - - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' ')
#}
elif pos1 == 2:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-13, ' - - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' '+str(elemArray[0][4])+' ')
#}
elif pos1 == 3:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-13, ' - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' '+str(elemArray[0][4])+' '+str(elemArray[0][5])+' ')
#}
if pos2 == 1:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-13, ' - - - '+str(elemArray[1][0])+' '+str(elemArray[1][1])+' '+str(elemArray[1][2])+' '+str(elemArray[1][3])+' ')
#}
elif pos2 == 2:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-13, ' - - '+str(elemArray[1][0])+' '+str(elemArray[1][1])+' '+str(elemArray[1][2])+' '+str(elemArray[1][3])+' '+str(elemArray[1][4])+' ')
#}
elif pos2 == 3:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-13, ' - '+str(elemArray[1][0])+' '+str(elemArray[1][1])+' '+str(elemArray[1][2])+' '+str(elemArray[1][3])+' '+str(elemArray[1][4])+' '+str(elemArray[1][5])+' ')
#}
if pos1 == len(elemArray[0]):#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-13, str(elemArray[0][pos1-3])+' '+str(elemArray[0][pos1-2])+' '+str(elemArray[0][pos1-1])+' '+str(elemArray[0][pos1])+' - - - ')
#}
elif pos1 == len(elemArray[0])-1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-13, str(elemArray[0][pos1-3])+' '+str(elemArray[0][pos1-2])+' '+str(elemArray[0][pos1-1])+' '+str(elemArray[0][pos1])+' '+str(elemArray[0][pos1+1])+' - - ')
#}
elif pos1 == len(elemArray[0])-2:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-13, str(elemArray[0][pos1-3])+' '+str(elemArray[0][pos1-2])+' '+str(elemArray[0][pos1-1])+' '+str(elemArray[0][pos1])+' '+str(elemArray[0][pos1+1])+' '+str(elemArray[0][pos1+2])+' - ')
#}
if pos2 == len(elemArray[1]):#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-13, str(elemArray[1][pos2-3])+' '+str(elemArray[1][pos2-2])+' '+str(elemArray[1][pos2-1])+' '+str(elemArray[1][pos2])+' - - - ')
#}
elif pos2 == len(elemArray[1])-1:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-13, str(elemArray[1][pos2-3])+' '+str(elemArray[1][pos2-2])+' '+str(elemArray[1][pos2-1])+' '+str(elemArray[1][pos2])+' '+str(elemArray[1][pos2+1])+' - - ')
#}
elif pos2 == len(elemArray[1])-2:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-13, str(elemArray[1][pos2-3])+' '+str(elemArray[1][pos2-2])+' '+str(elemArray[1][pos2-1])+' '+str(elemArray[1][pos2])+' '+str(elemArray[1][pos2+1])+' '+str(elemArray[1][pos2+2])+' - ')
#}
#}
elif selections == 3:#{
if pos1 == 1:#{
stdscr.addstr(dims[0]/2-selections*9/2+5, dims[1]/2-13, ' - - - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' ')
#}
elif pos1 == 2:#{
stdscr.addstr(dims[0]/2-selections*9/2+5, dims[1]/2-13, ' - - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' '+str(elemArray[0][4])+' ')
#}
elif pos1 == 3:#{
stdscr.addstr(dims[0]/2-selections*9/2+5, dims[1]/2-13, ' - '+str(elemArray[0][0])+' '+str(elemArray[0][1])+' '+str(elemArray[0][2])+' '+str(elemArray[0][3])+' '+str(elemArray[0][4])+' '+str(elemArray[0][5])+' ')
#}
if pos2 == 1:#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8, dims[1]/2-13, ' - - - '+str(elemArray[1][0])+' '+str(elemArray[1][1])+' '+str(elemArray[1][2])+' '+str(elemArray[1][3])+' ')
#}
elif pos2 == 2:#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8, dims[1]/2-13, ' - - '+str(elemArray[1][0])+' '+str(elemArray[1][1])+' '+str(elemArray[1][2])+' '+str(elemArray[1][3])+' '+str(elemArray[1][4])+' ')
#}
elif pos2 == 3:#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8, dims[1]/2-13, ' - '+str(elemArray[1][0])+' '+str(elemArray[1][1])+' '+str(elemArray[1][2])+' '+str(elemArray[1][3])+' '+str(elemArray[1][4])+' '+str(elemArray[1][5])+' ')
#}
if pos3 == 1:#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8+8, dims[1]/2-13, ' - - - '+str(elemArray[2][0])+' '+str(elemArray[2][1])+' '+str(elemArray[2][2])+' '+str(elemArray[2][3])+' ')
#}
elif pos3 == 2:#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8+8, dims[1]/2-13, ' - - '+str(elemArray[2][0])+' '+str(elemArray[2][1])+' '+str(elemArray[2][2])+' '+str(elemArray[2][3])+' '+str(elemArray[2][4])+' ')
#}
elif pos3 == 3:#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8+8, dims[1]/2-13, ' - '+str(elemArray[2][0])+' '+str(elemArray[2][1])+' '+str(elemArray[2][2])+' '+str(elemArray[2][3])+' '+str(elemArray[2][4])+' '+str(elemArray[2][5])+' ')
#}
if pos1 == len(elemArray[0]):#{
print '1'
#}
elif pos1 == len(elemArray[0])-1:#{
print '1'
#}
elif pos1 == len(elemArray[0])-2:#{
print '1'
#}
if pos2 == len(elemArray[1]):#{
print '1'
#}
elif pos2 == len(elemArray[1])-1:#{
print '1'
#}
elif pos2 == len(elemArray[1])-2:#{
print '1'
#}
if pos3 == len(elemArray[2]):#{
print '1'
#}
elif pos3 == len(elemArray[2])-1:#{
print '1'
#}
elif pos3 == len(elemArray[2])-2:#{
print '1'
#}
#}
elif selections == 4:#{
if pos1 == 1:#{
print '1'
#}
elif pos1 == 2:#{
print '1'
#}
elif pos1 == 3:#{
print '1'
#}
if pos2 == 1:#{
print '1'
#}
elif pos2 == 2:#{
print '1'
#}
elif pos2 == 3:#{
print '1'
#}
if pos3 == 1:#{
print '1'
#}
elif pos3 == 2:#{
print '1'
#}
elif pos3 == 3:#{
print '1'
#}
if pos4 == 1:#{
print '1'
#}
elif pos4 == 2:#{
print '1'
#}
elif pos4 == 3:#{
print '1'
#}
if pos1 == len(elemArray[0]):#{
print '1'
#}
elif pos1 == len(elemArray[0])-1:#{
print '1'
#}
elif pos1 == len(elemArray[0])-2:#{
print '1'
#}
if pos2 == len(elemArray[1]):#{
print '1'
#}
elif pos2 == len(elemArray[1])-1:#{
print '1'
#}
elif pos2 == len(elemArray[1])-2:#{
print '1'
#}
if pos3 == len(elemArray[2]):#{
print '1'
#}
elif pos3 == len(elemArray[2])-1:#{
print '1'
#}
elif pos3 == len(elemArray[2])-2:#{
print '1'
#}
if pos4 == len(elemArray[3]):#{
print '1'
#}
elif pos4 == len(elemArray[3])-1:#{
print '1'
#}
elif pos4 == len(elemArray[3])-2:#{
print '1'
#}
#}
input = stdscr.getch()
if input == curses.KEY_BACKSPACE:
stdscr.clear()
stdscr.refresh()
break
#}
#}
def numberSelector(stdscr, L, currPos):#{
if len(L) == 6:#{
pos = currPos[0]
subject = L[0]
numbers = int(L[6])-int(L[5])+1
dims = stdscr.getmaxyx()
while True:#{
for x in range(0,3):#{
stdscr.addnstr(dims[0]/2-5+x, dims[1]/2-numbers*3/2, ornament2, (numbers)*3, curses.A_REVERSE)
stdscr.addnstr(dims[0]/2+3+x, dims[1]/2-numbers*3/2, ornament2, (numbers)*3, curses.A_REVERSE)
#}
stdscr.addstr(dims[0]/2-4, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
stdscr.addstr(dims[0]/2+1, dims[1]/2-9,'-- -- -- --')
stdscr.addstr(dims[0]/2-1, dims[1]/2-9,'-- -- -- --')
stdscr.addstr(dims[0]/2+1, dims[1]/2-1,'--', curses.color_pair(9))
stdscr.addstr(dims[0]/2-1, dims[1]/2-1,'--', curses.color_pair(9))
for y in range(0,6):#{
stdscr.addstr(dims[0]/2-2+y, dims[1]/2-12, ' ', curses.A_REVERSE)
stdscr.addstr(dims[0]/2-2+y, dims[1]/2+12, ' ', curses.A_REVERSE)
#}
if pos == int(L[5]):#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, '-- -- '+' '+' '+str(pos+1).rjust(2,'0')+' '+str(pos+2).rjust(2,'0'))
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
elif pos == int(L[5])+1:#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, '-- '+str(pos-1).rjust(2,'0')+' '+' '+' '+str(pos+1).rjust(2,'0')+' '+str(pos+2).rjust(2,'0'))
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
elif pos == int(L[6])-1:#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, str(pos-2).rjust(2,'0')+' '+str(pos-1).rjust(2,'0')+' '+' '+' '+str(pos+1).rjust(2,'0')+' --')
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
elif pos == int(L[6]):#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, str(pos-2).rjust(2,'0')+' '+str(pos-1).rjust(2,'0')+' '+' '+' -- --')
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
else:#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, str(pos-2).rjust(2,'0')+' '+str(pos-1).rjust(2,'0')+' '+' '+' '+str(pos+1).rjust(2,'0')+' '+str(pos+2).rjust(2,'0'))
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
stdscr.addstr(dims[0]/2+4, dims[1]/2-len('Press "h" for help')/2, 'Press "h" for help', curses.A_REVERSE)
input=stdscr.getch()
if input == curses.KEY_LEFT and pos > int(L[5]):#{
pos-=1
#}
elif input == curses.KEY_RIGHT and pos < int(L[6]):#{
pos+=1
#}
elif input == ord('h'):
stdscr.addstr(dims[0]-2, dims[1]/2-len(L[1])/2, L[1])
elif input == 10:#{
stdscr.clear()
stdscr.refresh()
return pos
break
#}
elif input == curses.KEY_BACKSPACE:#{
pos = -1
stdscr.clear()
stdscr.refresh()
return pos
break
#}
#}
elif len(L) == 8:#{
pos1 = currPos[0]
pos2 = currPos[1]
posv = 1
header = L[0]
numbers = int(L[7])-int(L[6])+1
dims = stdscr.getmaxyx()
while True:#{
for y in range(0,3):#{
for x in range(0,3):#{
stdscr.addnstr(dims[0]/2-9+x+8*y, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
#}
stdscr.addstr(dims[0]/2-8, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
for c in range (0,2):#{
stdscr.addstr(dims[0]/2-4+1+c*8, dims[1]/2-9,'-- -- -- -- --')
stdscr.addstr(dims[0]/2-4-1+c*8, dims[1]/2-9,'-- -- -- -- --')
if posv == 1 and c == 0:#{
stdscr.addstr(dims[0]/2-4+1+c*8, dims[1]/2-1,'--', curses.color_pair(9))
stdscr.addstr(dims[0]/2-4-1+c*8, dims[1]/2-1,'--', curses.color_pair(9))
#}
elif posv == 2 and c == 1:#{
stdscr.addstr(dims[0]/2-4+1+c*8, dims[1]/2-1,'--', curses.color_pair(9))
stdscr.addstr(dims[0]/2-4-1+c*8, dims[1]/2-1,'--', curses.color_pair(9))
#}
#}
for d in range(0,2):#{
for y in range(0,6):#{
stdscr.addstr(dims[0]/2-6+y+d*8, dims[1]/2-12, ' ', curses.A_REVERSE)
stdscr.addstr(dims[0]/2-6+y+d*8, dims[1]/2+12, ' ', curses.A_REVERSE)
#}
#}
if pos1 == int(L[4]):#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-9, '-- -- '+' '+' '+str(pos1+1).rjust(2,'-')+' '+str(pos1+2).rjust(2,'-'))
if posv == 1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-1, str(pos1).rjust(2,'-'), curses.color_pair(9))
#}
#}
elif pos1 == int(L[4])+1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-9, '-- '+str(pos1-1).rjust(2,'-')+' '+' '+' '+str(pos1+1).rjust(2,'-')+' '+str(pos1+2).rjust(2,'-'))
if posv == 1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-1, str(pos1).rjust(2,'-'), curses.color_pair(9))
#}
#}
elif pos1 == int(L[5])-1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-9, str(pos1-2).rjust(2,'-')+' '+str(pos1-1).rjust(2,'-')+' '+' '+' '+str(pos1+1).rjust(2,'-')+' --')
if posv == 1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-1, str(pos1).rjust(2,'-'), curses.color_pair(9))
#}
#}
elif pos1 == int(L[5]):#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-9, str(pos1-2).rjust(2,'-')+' '+str(pos1-1).rjust(2,'-')+' '+' '+' -- --')
if posv == 1:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-1, str(pos1).rjust(2,' '), curses.color_pair(9))
#}
#}
else:#{
stdscr.addstr(dims[0]/2-4, dims[1]/2-9, str(pos1-2).rjust(2,'-')+' '+str(pos1-1).rjust(2,'-')+' '+' '+' '+str(pos1+1).rjust(2,'-')+' '+str(pos1+2).rjust(2,'-'))
if posv == 1:
stdscr.addstr(dims[0]/2-4, dims[1]/2-1, str(pos1).rjust(2,'-'), curses.color_pair(9))
#}
#}
if pos2 == int(L[6]):#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-9, '-- -- '+' '+' '+str(pos2+1).rjust(2,'-')+' '+str(pos2+2).rjust(2,'-'))
if posv == 2:
stdscr.addstr(dims[0]/2+4, dims[1]/2-1, str(pos2).rjust(2,'-'), curses.color_pair(9))
#}
#}
elif pos2 == int(L[6])+1:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-9, '-- '+str(pos2-1).rjust(2,'-')+' '+' '+' '+str(pos2+1).rjust(2,'-')+' '+str(pos2+2).rjust(2,'-'))
if posv == 2:
stdscr.addstr(dims[0]/2+4, dims[1]/2-1, str(pos2).rjust(2,'-'), curses.color_pair(9))
#}
#}
elif pos2 == int(L[7])-1:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-9, str(pos2-2).rjust(2,'-')+' '+str(pos2-1).rjust(2,'-')+' '+' '+' '+str(pos2+1).rjust(2,'-')+' --')
if posv == 2:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-1, str(pos2).rjust(2,'-'), curses.color_pair(9))
#}
#}
elif pos2 == int(L[7]):#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-9, str(pos2-2).rjust(2,'-')+' '+str(pos2-1).rjust(2,'-')+' '+' '+' -- --')
if posv == 2:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-1, str(pos2).rjust(2,'-'), curses.color_pair(9))
#}
#}
else:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-9, str(pos2-2).rjust(2,'-')+' '+str(pos2-1).rjust(2,'-')+' '+' '+' '+str(pos2+1).rjust(2,'-')+' '+str(pos2+2).rjust(2,'-'))
if posv == 2:#{
stdscr.addstr(dims[0]/2+4, dims[1]/2-1, str(pos2).rjust(2,'-'), curses.color_pair(9))
#}
#}
stdscr.addstr(dims[0]/2+8, dims[1]/2-len('Press "h" for help')/2, 'Press "h" for help', curses.A_REVERSE)
input=stdscr.getch()
if input == curses.KEY_LEFT and pos1 > int(L[4]) and posv == 1:#{
pos1-=1
#}
elif input == curses.KEY_RIGHT and pos1 < int(L[5]) and posv == 1:#{
pos1+=1
#}
if input == curses.KEY_LEFT and pos2 > int(L[6]) and posv == 2:#{
pos2-=1
#}
elif input == curses.KEY_RIGHT and pos2 < int(L[7]) and posv == 2:#{
pos2+=1
#}
if input == curses.KEY_UP and posv == 2:#{
posv-=1
#}
elif input == curses.KEY_DOWN and posv == 1:#{
posv+=1
#}
elif input == ord('h'):
stdscr.addstr(dims[0]-2, dims[1]/2-len(L[1])/2, L[1])
elif input == 10:#{
pos = [pos1,pos2]
stdscr.clear()
stdscr.refresh()
return pos
break
#}
elif input == curses.KEY_BACKSPACE:#{
pos = [-1,-1]
stdscr.clear()
stdscr.refresh()
return pos
break
#}
#}
#}
gamemenu=['GAME OF LIFE','Arrows to select. Enter to submit. Exit with backspace','Start the Game','Settings','Exit']
optionsmenu=['SETTINGS','Arrows to select. Enter to submit. Exit with backspace', 'Board dimensions', 'Cell graphics', 'Rules']
initgame=['GAMEMODE', 'Arrows to select. Enter to submit. Exit with backspace', 'Custom start', 'Select an example']
samples=['LIST OF SAMPLES','Arrows to select. Enter to submit. Exit with backspace', 'Pentadecathlon (repeats after 15 steps)', 'Pulsar (repeats after 3 steps)', 'Lightweight spaceship (LWSS, repeats never)', 'Blinker (repeats after 2 steps)', 'Toad (repeats after 2 steps)']
sizeselector=['SELECT THE SIZE OF LIFE', 'Arrows to select. Enter to submit. Exit with backspace', 'OK', 'CANCEL',]
charselector=['PUT SOME MAKEUP ON DEM CELLS','Arrows to select. Enter to submit. Cancel wih backspace','Alive cells','Dead cells']
elemArray=[['+','-','*','/','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','1','2','3','4','5','6','7','8','9','0','?','!','<','>'],['O','0','o','D','C','G']]
rules=['RULES','Keys 1-3 for rules 1-3. Cancel with "Backspace"','Original rules','Rule 2','Rule 3']
ornament1='======================================================================================================='
ornament2=' '
ornament3='-------------------------------------------------------------------------------------------------------'
if __name__ == '__main__':#{
wrapper(main)
#}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.