gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from keystone.common import dependency
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _
from keystone.token import provider
from keystone.token.providers import common
from keystone.token.providers.fernet import token_formatters as tf
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('trust_api')
class Provider(common.BaseProvider):
def __init__(self, *args, **kwargs):
super(Provider, self).__init__(*args, **kwargs)
self.token_formatter = tf.TokenFormatter()
def needs_persistence(self):
"""Should the token be written to a backend."""
return False
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
"""Issue a V2 formatted token.
:param token_ref: reference describing the token
:param roles_ref: reference describing the roles for the token
:param catalog_ref: reference describing the token's catalog
:returns: tuple containing the ID of the token and the token data
"""
# TODO(lbragstad): Currently, Fernet tokens don't support bind in the
# token format. Raise a 501 if we're dealing with bind.
if token_ref.get('bind'):
raise exception.NotImplemented()
user_id = token_ref['user']['id']
# Default to password since methods not provided by token_ref
method_names = ['password']
project_id = None
# Verify that tenant is not None in token_ref
if token_ref.get('tenant'):
project_id = token_ref['tenant']['id']
parent_audit_id = token_ref.get('parent_audit_id')
# If parent_audit_id is defined then a token authentication was made
if parent_audit_id:
method_names.append('token')
audit_ids = provider.audit_info(parent_audit_id)
# Get v3 token data and exclude building v3 specific catalog. This is
# due to the fact that the V2TokenDataHelper.format_token() method
# doesn't build any of the token_reference from other Keystone APIs.
# Instead, it builds it from what is persisted in the token reference.
# Here we are going to leverage the V3TokenDataHelper.get_token_data()
# method written for V3 because it goes through and populates the token
# reference dynamically. Once we have a V3 token reference, we can
# attempt to convert it to a V2 token response.
v3_token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
project_id=project_id,
token=token_ref,
include_catalog=False,
audit_info=audit_ids)
expires_at = v3_token_data['token']['expires_at']
token_id = self.token_formatter.create_token(user_id, expires_at,
audit_ids,
methods=method_names,
project_id=project_id)
# Convert v3 to v2 token data and build v2 catalog
token_data = self.v2_token_data_helper.v3_to_v2_token(token_id,
v3_token_data)
return token_id, token_data
def _build_federated_info(self, token_data):
"""Extract everything needed for federated tokens.
This dictionary is passed to the FederatedPayload token formatter,
which unpacks the values and builds the Fernet token.
"""
group_ids = token_data.get('user', {}).get(
federation.FEDERATION, {}).get('groups')
idp_id = token_data.get('user', {}).get(
federation.FEDERATION, {}).get('identity_provider', {}).get('id')
protocol_id = token_data.get('user', {}).get(
federation.FEDERATION, {}).get('protocol', {}).get('id')
if not group_ids:
group_ids = list()
federated_dict = dict(group_ids=group_ids, idp_id=idp_id,
protocol_id=protocol_id)
return federated_dict
def _rebuild_federated_info(self, federated_dict, user_id):
"""Format federated information into the token reference.
The federated_dict is passed back from the FederatedPayload token
formatter. The responsibility of this method is to format the
information passed back from the token formatter into the token
reference before constructing the token data from the
V3TokenDataHelper.
"""
g_ids = federated_dict['group_ids']
idp_id = federated_dict['idp_id']
protocol_id = federated_dict['protocol_id']
federated_info = dict(groups=g_ids,
identity_provider=dict(id=idp_id),
protocol=dict(id=protocol_id))
token_dict = {'user': {federation.FEDERATION: federated_info}}
token_dict['user']['id'] = user_id
token_dict['user']['name'] = user_id
return token_dict
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True,
parent_audit_id=None):
"""Issue a V3 formatted token.
Here is where we need to detect what is given to us, and what kind of
token the user is expecting. Depending on the outcome of that, we can
pass all the information to be packed to the proper token format
handler.
:param user_id: ID of the user
:param method_names: method of authentication
:param expires_at: token expiration time
:param project_id: ID of the project being scoped to
:param domain_id: ID of the domain being scoped to
:param auth_context: authentication context
:param trust: ID of the trust
:param metadata_ref: metadata reference
:param include_catalog: return the catalog in the response if True,
otherwise don't return the catalog
:param parent_audit_id: ID of the parent audit entity
:returns: tuple containing the id of the token and the token data
"""
# TODO(lbragstad): Currently, Fernet tokens don't support bind in the
# token format. Raise a 501 if we're dealing with bind.
if auth_context.get('bind'):
raise exception.NotImplemented()
token_ref = None
# NOTE(lbragstad): This determines if we are dealing with a federated
# token or not. The groups for the user will be in the returned token
# reference.
federated_dict = None
if auth_context and self._is_mapped_token(auth_context):
token_ref = self._handle_mapped_tokens(
auth_context, project_id, domain_id)
federated_dict = self._build_federated_info(token_ref)
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
auth_context.get('extras') if auth_context else None,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
trust=trust,
bind=auth_context.get('bind') if auth_context else None,
token=token_ref,
include_catalog=include_catalog,
audit_info=parent_audit_id)
token = self.token_formatter.create_token(
user_id,
token_data['token']['expires_at'],
token_data['token']['audit_ids'],
methods=method_names,
domain_id=domain_id,
project_id=project_id,
trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
federated_info=federated_dict)
return token, token_data
def validate_v2_token(self, token_ref):
"""Validate a V2 formatted token.
:param token_ref: reference describing the token to validate
:returns: the token data
:raises keystone.exception.Unauthorized: if v3 token is used
"""
(user_id, methods,
audit_ids, domain_id,
project_id, trust_id,
federated_info, created_at,
expires_at) = self.token_formatter.validate_token(token_ref)
if trust_id or domain_id or federated_info:
msg = _('This is not a v2.0 Fernet token. Use v3 for trust, '
'domain, or federated tokens.')
raise exception.Unauthorized(msg)
v3_token_data = self.v3_token_data_helper.get_token_data(
user_id,
methods,
project_id=project_id,
expires=expires_at,
issued_at=created_at,
token=token_ref,
include_catalog=False,
audit_info=audit_ids)
return self.v2_token_data_helper.v3_to_v2_token(token_ref,
v3_token_data)
def validate_v3_token(self, token):
"""Validate a V3 formatted token.
:param token: a string describing the token to validate
:returns: the token data
:raises keystone.exception.Unauthorized: if token format version isn't
supported
"""
(user_id, methods, audit_ids, domain_id, project_id, trust_id,
federated_info, created_at, expires_at) = (
self.token_formatter.validate_token(token))
token_dict = None
trust_ref = None
if federated_info:
token_dict = self._rebuild_federated_info(federated_info, user_id)
if trust_id:
trust_ref = self.trust_api.get_trust(trust_id)
return self.v3_token_data_helper.get_token_data(
user_id,
method_names=methods,
domain_id=domain_id,
project_id=project_id,
issued_at=created_at,
expires=expires_at,
trust=trust_ref,
token=token_dict,
audit_info=audit_ids)
def _get_token_id(self, token_data):
"""Generate the token_id based upon the data in token_data.
:param token_data: token information
:type token_data: dict
:raises keystone.exception.NotImplemented: when called
"""
raise exception.NotImplemented()
|
|
"""ZeroconfMessaging is a module to make local multicasted, multithreaded,
queued unordered, reliable messaging simpler than you can imagine. Peer
discovery happens automatically. Naming conflicts are solved automatically.
Forget IGMP and special hardware requirements.
Uses zeroconf networking.
Message size is limited to 255 bytes.
"""
import copy
import select
import pybonjour
import cPickle
import Queue
import socket
import threading
import time
# The protocol version is stored automatically in the primary TXT record and
# associated with a "textvers" key, as is the convention in Bonjour/zeroconf.
# Define exceptions.
class ZeroconfMessagingError(Exception): pass
class InvalidCallbackError(ZeroconfMessagingError): pass
class MessageTooLargeError(ZeroconfMessagingError): pass
class ProtocolVersionMismatch(ZeroconfMessagingError): pass
# Copied from django.utils.functional
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
class ZeroconfMessaging(threading.Thread):
def __init__(self, serviceName, serviceType, port, protocolVersion=1,
serviceRegisteredCallback=None,
serviceRegistrationFailedCallback=None,
serviceUnregisteredCallback=None,
peerServiceDiscoveryCallback=None,
peerServiceRemovalCallback=None,
peerServiceUpdateCallback=None,
peerServiceDescriptionUpdatedCallback=None):
super(ZeroconfMessaging, self).__init__(name='ZeroconfMessaging-Thread')
# Ensure the callbacks are valid.
if not callable(serviceRegisteredCallback):
raise InvalidCallbackError, "service registered callback"
if not callable(serviceRegistrationFailedCallback):
raise InvalidCallbackError, "service registration failed callback"
if not callable(serviceUnregisteredCallback):
raise InvalidCallbackError, "service unregistered callback"
if not callable(peerServiceDiscoveryCallback):
raise InvalidCallbackError, "peer service discovery callback"
if not callable(peerServiceRemovalCallback):
raise InvalidCallbackError, "peer service removal callback"
if not callable(peerServiceUpdateCallback):
raise InvalidCallbackError, "peer service update callback"
if not callable(peerServiceDescriptionUpdatedCallback):
raise InvalidCallbackError, "peer service description updated callback"
# If port is set to None, then pick a random free port automatically.
if port is None:
self.socket = socket.socket()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', 0))
(ip, port) = self.socket.getsockname()
# Callbacks.
self.serviceRegisteredCallback = serviceRegisteredCallback
self.serviceRegistrationFailedCallback = serviceRegistrationFailedCallback
self.serviceUnregisteredCallback = serviceUnregisteredCallback
self.peerServiceDiscoveryCallback = peerServiceDiscoveryCallback
self.peerServiceRemovalCallback = peerServiceRemovalCallback
self.peerServiceUpdateCallback = peerServiceUpdateCallback
self.peerServiceDescriptionUpdatedCallback = peerServiceDescriptionUpdatedCallback
# Metadata.
self.serviceName = serviceName
self.serviceType = serviceType
self.protocolVersion = protocolVersion
self.port = port
# Message relaying.
self.inbox = Queue.Queue()
self.outbox = Queue.Queue()
# Mutual exclusion.
self.lock = threading.Condition()
# Metadata for the ZeroconfMessaging implementation.
self.txtRecords = {}
# Metadata about peers.
self.peers = {}
self.peersTxtRecords = {}
self.peersTxtRecordsUpdatedSinceLastCallback = {}
self.peersTxtRecordsDeletedSinceLastCallback = {}
# State variables.
self.serverReady = False
self.clientReady = False
self.alive = True
self.die = False
# Service descriptor references.
self.sdRefServer = None # A single sdRef to send messages.
self.sdRefBrowse = None # A single sdRef to discover peers.
self.sdRefSingleShots = [] # A list of sdRefs that need to return something just once.
self.sdRefTXTRecordQueries = [] # A list of sdRefs that are "long-lived", always awaiting new/updated TXT records.
def run(self):
# Register.
self._register()
# Browse.
self._browse()
while self.alive:
# Process responses of the zeroconf server (register, browse,
# resolve, query callbacks).
self._processResponses()
# When registration has been completed:
if self.serverReady:
# 1) send multicast messages waiting in the outbox.
self._send()
# 2) receive multicast messages.
# Messages are received through _queryTXTRecordCallback() and
# are put in the inbox directly from there.
# Commit suicide when asked to.
with self.lock:
if self.die and self.outbox.qsize() == 0:
self._commitSuicide()
# Processing the queues 50 times per second is sufficient.
time.sleep(0.02)
def kill(self):
# Let the thread know it should commit suicide.
with self.lock:
self.die = True
def _register(self):
"""Register the DNS service, along with a primary TXT record, which
will contain the protocol version.
Must only be called once.
"""
primaryTxtRecord = pybonjour.TXTRecord()
primaryTxtRecord['textvers'] = self.protocolVersion
self.sdRefServer = pybonjour.DNSServiceRegister(name = self.serviceName,
regtype = self.serviceType,
port = self.port,
txtRecord = primaryTxtRecord,
callBack = self._serviceRegisteredCallback)
def _browse(self):
"""Browse to find hosts that offer the same service.
Must only be called once, because it will continue indefinitely.
"""
self.sdRefBrowse = pybonjour.DNSServiceBrowse(regtype = self.serviceType,
callBack = self._browseCallback)
self.clientReady = True
def _browseCallback(self, sdRef, flags, interfaceIndex, errorCode, serviceName, regtype, replyDomain):
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
else:
# TODO: add optional error callback?
pass
# Discovering our own service doesn't count.
if serviceName == self.serviceName:
return
# Rediscovering an already discovered service (e.g. due to a new or
# updated TXT record) on the same interface also doesn't count.
if serviceName in self.peers.keys() and interfaceIndex in self.peers[serviceName].keys():
return
# If no service is being added, then one is being removed.
if not (flags & pybonjour.kDNSServiceFlagsAdd):
self.peerServiceRemovalCallback(serviceName, interfaceIndex)
del self.peers[serviceName][interfaceIndex]
if len(self.peers[serviceName]) == 0:
del self.peers[serviceName]
return
# Create curried callbacks so we can pass additional data to the
# resolve callback.
curriedCallback = curry(self._resolveCallback,
serviceName = serviceName)
# We've found a peer with the same service, but now we still have to
# determine the details: full name, host target, port, primary
# TXT record and IP address.
sdRef = pybonjour.DNSServiceResolve(0,
interfaceIndex,
serviceName,
regtype,
replyDomain,
curriedCallback)
self.sdRefSingleShots.append(sdRef)
def _resolveCallback(self, sdRef, flags, interfaceIndex, errorCode, fullname, hosttarget, port, txtRecord, serviceName):
"""Callback for DNSServiceResolve(). serviceName should be curried."""
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
else:
# TODO: add optional error callback?
pass
# Only changes in either of these will result in updated service
# metadata, and the associated peerServiceUpdateCallback callback.
updatedServiceKeys = ['fullname', 'hosttarget', 'port']
metadata = {
'serviceName' : serviceName,
'fullname' : fullname,
'hosttarget' : hosttarget,
'port' : port,
}
# Store metadata.
if not serviceName in self.peers.keys():
self.peers[serviceName] = {}
# Initial resolve: new service: store metadata and look up the IP
# address.
if interfaceIndex not in self.peers[serviceName].keys():
self.peers[serviceName][interfaceIndex] = metadata
# Create a curried callback so we can pass additional data to the
# (single-shot) A query record callback.
curriedCallback = curry(self._queryARecordCallback,
serviceName = serviceName,
hosttarget = hosttarget,
port = port)
# Retrieve the IP address by querying the peer's A record.
sdRef = pybonjour.DNSServiceQueryRecord(interfaceIndex = interfaceIndex,
fullname = hosttarget,
rrtype = pybonjour.kDNSServiceType_A,
callBack = curriedCallback)
self.sdRefSingleShots.append(sdRef)
# Create a curried callback so we can pass additional data to the
# (long-lived) TXT query record callback.
curriedCallback = curry(self._queryTXTRecordCallback,
serviceName = serviceName,
hosttarget = hosttarget, # TRICKY: A record has name like "_http._tcp.local", hence use hosttarget.
port = port)
# Monitor this service's TXT records.
sdRef = pybonjour.DNSServiceQueryRecord(interfaceIndex = interfaceIndex,
flags = pybonjour.kDNSServiceFlagsLongLivedQuery,
fullname = fullname, # TRICKY: TXT record has name like "My Web Server._http._tcp.local", hence use fullname.
rrtype = pybonjour.kDNSServiceType_TXT,
callBack = curriedCallback)
self.sdRefTXTRecordQueries.append(sdRef)
# Secondary resolves: updated service or simply different txtRecords.
# Ignore multiple resolves for the same service (this happens when
# a service has multiple TXT records, see
# http://code.google.com/p/pybonjour/issues/detail?id=2).
else:
# Only certain changes in metadata will result in an update. Build
# dictionaries containing just these values.
curMetadata = {}
newMetadata = {}
for key in updatedServiceKeys:
curMetadata[key] = metadata[key]
newMetadata[key] = self.peers[serviceName][interfaceIndex][key]
# If the metadata differs: updated service.
if curMetadata != newMetadata:
for key in updatedServiceKeys:
self.peers[serviceName][interfaceIndex][key] = metadata[key]
self.peerServiceUpdateCallback(serviceName, interfaceIndex, fullname, hosttarget, ip, port, txtRecord)
def _queryARecordCallback(self, sdRef, flags, interfaceIndex, errorCode, fullname, rrtype, rrclass, rdata, ttl, serviceName, hosttarget, port):
"""Callback for DNSServiceQueryRecord(). serviceName, hosttarget and
port should be curried.
"""
if errorCode == pybonjour.kDNSServiceErr_NoError:
# We've now got *all* information about the peer with the same
# service. Time to call the callback.
ip = socket.inet_ntoa(rdata)
if not serviceName in self.peers.keys():
self.peers[serviceName] = {}
self.peers[serviceName][interfaceIndex] = {
'serviceName' : serviceName,
'fullname' : fullname,
'hosttarget' : hosttarget,
'ip' : ip,
'port' : port,
}
self.peerServiceDiscoveryCallback(serviceName, interfaceIndex, fullname, hosttarget, ip, port)
else:
# TODO: add optional error callback?
pass
def _queryTXTRecordCallback(self, sdRef, flags, interfaceIndex, errorCode, fullname, rrtype, rrclass, rdata, ttl, serviceName, hosttarget, port):
# Parse the data directly, without using pybonjour.TXTRecord. The code
# would be uglier and less efficient, because we always store a single
# key-value pair, whereas the specification allows for multiple pairs.
length = ord(rdata[0])
key, value = rdata[1:length+1].split('=', 1)
# When the TTL is zero, a record has been "removed". In reality, this
# is only broadcast when a record has been *updated*, not removed
# (when a record is removed, nothing is broadcast). The new value is
# included in the same broadcast and therefor this callback function
# will be called again for this TXT record, but then containing the
# updated value. Hence we can ignore this callback.
if ttl == 0:
return
# When the key is "textvers", verify the protocol version.
if key == 'textvers':
if str(value) != str(self.protocolVersion):
# Remove this peer since it doesn't have a matching protocol
# version anyway.
self.sdRefTXTRecordQueries.remove(sdRef)
del self.peers[serviceName]
raise ProtocolVersionMismatch, "Removed peer '%s' due to protol version mismatch. Own protocol version: %s, other protocol version: %s." % (serviceName, self.protocolVersion, value)
return
# Keep track of all TXT records.
if serviceName not in self.peersTxtRecords.keys():
self.peersTxtRecords[serviceName] = {}
self.peersTxtRecordsUpdatedSinceLastCallback[serviceName] = {}
self.peersTxtRecordsDeletedSinceLastCallback[serviceName] = {}
if interfaceIndex not in self.peersTxtRecords[serviceName].keys():
self.peersTxtRecords[serviceName][interfaceIndex] = {}
self.peersTxtRecordsUpdatedSinceLastCallback[serviceName][interfaceIndex] = []
self.peersTxtRecordsDeletedSinceLastCallback[serviceName][interfaceIndex] = []
# When the value is 'DELETE', delete the corresponding key from the
# TXT records. Else, unpickle the value and update our local mirror
# of the peer's TXT records (and remember which records have been
# updated, so we can send a single callback for multiple changes).
if value == 'DELETE':
if serviceName in self.peersTxtRecords.keys():
if interfaceIndex in self.peersTxtRecords[serviceName].keys():
if key in self.peersTxtRecords[serviceName][interfaceIndex].keys():
del self.peersTxtRecords[serviceName][interfaceIndex][key]
self.peersTxtRecordsDeletedSinceLastCallback[serviceName][interfaceIndex].append(key)
# Else, this is either a new or updated key-value pair. Mark the key
# as having an update and store the pickled value.
else:
self.peersTxtRecordsUpdatedSinceLastCallback[serviceName][interfaceIndex].append(key)
self.peersTxtRecords[serviceName][interfaceIndex][key] = cPickle.loads(value)
# Only put messages in the inbox when no more TXT record changes are
# coming from this service/interface combo.
if not (flags & pybonjour.kDNSServiceFlagsMoreComing):
# Get the TXT records and the keys of the updated and deleted TXT
# records.
txtRecords = self.peersTxtRecords[serviceName][interfaceIndex]
updated = self.peersTxtRecordsUpdatedSinceLastCallback[serviceName][interfaceIndex]
deleted = self.peersTxtRecordsDeletedSinceLastCallback[serviceName][interfaceIndex]
# Erase the lists of keys of updated and deleted TXT records for
# the next time.
self.peersTxtRecordsUpdatedSinceLastCallback[serviceName][interfaceIndex] = []
self.peersTxtRecordsDeletedSinceLastCallback[serviceName][interfaceIndex] = []
# Send the callback, if it is callable.
if callable(self.peerServiceDescriptionUpdatedCallback):
self.peerServiceDescriptionUpdatedCallback(serviceName, interfaceIndex, copy.deepcopy(txtRecords), updated, deleted)
# Update the inbox. Only send the message itself, not the details.
with self.lock:
message = [{header : txtRecords[header]} for header in updated]
if len(message):
self.inbox.put(message)
self.lock.notifyAll()
def _processResponses(self):
# Process responses for server (i.e. registration callback, TXT record
# updates).
if self.sdRefServer is not None:
self._processResponsesForService(self.sdRefServer)
# Process responses for client (i.e. detecting peers with a matching
# service type and peers' updated TXT records).
if self.serverReady and self.clientReady and self.sdRefBrowse is not None:
self._processResponsesForService(self.sdRefBrowse)
# Process responses for one shot service descriptor references (i.e.
# resolve and A record query callbacks). These service descriptor
# references must be closed as soon as we get input (hence "one shot").
for sdRef in self.sdRefSingleShots:
if self._processResponsesForService(sdRef):
self.sdRefSingleShots.remove(sdRef)
sdRef.close()
# Process responses for "long-lived" service descriptor references
# (i.e. TXT record query callbacks).
for sdRef in self.sdRefTXTRecordQueries:
self._processResponsesForService(sdRef)
def _processResponsesForService(self, sdRef):
"""Helper function for _procesResponses(). Returns True when input was
ready, False otherwise."""
# If there's input waiting to be processed, process it. Only wait
# for input for 0.05 seconds (avoid blocking).
inputReady, outputReady, exceptReady = select.select([sdRef], [], [], 0.05)
if sdRef in inputReady:
pybonjour.DNSServiceProcessResult(sdRef)
return True
return False
def _serviceRegisteredCallback(self, sdRef, flags, errorCode, name, regtype, domain):
# Call the appropriate external callback.
if errorCode == pybonjour.kDNSServiceErr_NoError:
# Update our own service name.
self.serviceName = name
self.serverReady = True
# Call the service registration callback.
self.serviceRegisteredCallback(sdRef, flags, errorCode, name, regtype, domain, self.port)
else:
# Call the error callback, and pass it the error message.
errorMessage = pybonjour.BonjourError._errmsg[errorCode]
self.serviceRegistrationFailedCallback(sdRef, flags, errorCode, errorMessage, name, regtype, domain)
def _send(self):
"""Send all messages waiting to be sent in the outbox."""
with self.lock:
while self.outbox.qsize() > 0:
self._sendMessage(self.outbox.get())
def _sendMessage(self, message):
"""Helper method for _send()."""
newTxtRecords = {}
for key, value in message.items():
serializedValue = cPickle.dumps(value)
# Validate TXT record size: it should never exceed 255 bytes.
if len(key) + len('=') + len(serializedValue) > 255:
raise MessageTooLargeError, "message size is: %d for key '%s'" % (len(key) + len('=') + len(serializedValue), key)
txt = pybonjour.TXTRecord({key : serializedValue})
newTxtRecords[key] = {'value' : value, 'txtRecord' : txt}
# Make sets out of the keys of the TXT records to make it easier to
# determine what should happen.
curKeys = set(self.txtRecords.keys())
newKeys = set(newTxtRecords.keys())
# Update: intersection of current and new TXT records, plus a value
# comparison to ensure we only update when the value actually changed.
for key in curKeys.intersection(newKeys):
if self.txtRecords[key]['value'] != newTxtRecords[key]['value']:
# print "\tUpdating:", key
pybonjour.DNSServiceUpdateRecord(sdRef = self.sdRefServer,
RecordRef = self.txtRecords[key]['recordReference'],
rdata = newTxtRecords[key]['txtRecord'])
# Update the stored TXT record.
self.txtRecords[key]['txtRecord'] = newTxtRecords[key]['txtRecord']
self.txtRecords[key]['value'] = newTxtRecords[key]['value']
# Remove: difference of current with new TXT records.
for key in curKeys.difference(newKeys):
# print "\tRemoving: ", key
# A removed record doesn't get broadcast. So first update the
# record's value to the string 'DELETE'. This is our way of
# telling that this TXT record will be deleted.
pybonjour.DNSServiceUpdateRecord(sdRef = self.sdRefServer,
RecordRef = self.txtRecords[key]['recordReference'],
rdata = pybonjour.TXTRecord({key : 'DELETE'}))
# Now actually remove the record.
# TRICKY: this doesn't have to ever happen. See the above comment.
# pybonjour.DNSServiceRemoveRecord(sdRef = self.sdRefServer,
# RecordRef = self.txtRecords[key]['recordReference'])
# Remove the stored TXT record.
del self.txtRecords[key]
# Add: difference of new with current TXT records.
for key in newKeys.difference(curKeys):
# print "\tAdding:", key
rRef = pybonjour.DNSServiceAddRecord(sdRef = self.sdRefServer,
rrtype = pybonjour.kDNSServiceType_TXT,
rdata = newTxtRecords[key]['txtRecord'])
# Store the new TXT record.
self.txtRecords[key] = newTxtRecords[key]
# Also store the record reference.
self.txtRecords[key]['recordReference'] = rRef
def _commitSuicide(self):
"""Commit suicide when asked to. The lock must be acquired before
calling this method.
"""
# Close the service descriptor references.
if self.sdRefServer is not None:
self.sdRefServer.close()
# We've now unregistered our service, thus call the
# serviceUnregisteredCallback callback.
self.serviceUnregisteredCallback(self.serviceName, self.serviceType, self.port)
if self.sdRefBrowse is not None:
self.sdRefBrowse.close()
for sdRef in self.sdRefSingleShots:
sdRef.close()
for sdRef in self.sdRefTXTRecordQueries:
sdRef.close()
# Stop us from running any further.
self.alive = False
if __name__ == "__main__":
import time
import platform
import copy
from optparse import OptionParser
def serviceRegisteredCallback(sdRef, flags, errorCode, name, regtype, domain, port):
print "SERVICE REGISTERED CALLBACK FIRED, params: sdRef=%d, flags=%d, errorCode=%d, name=%s, regtype=%s, domain=%s, port=%d" % (sdRef.fileno(), flags, errorCode, name, regtype, domain, port)
def serviceRegistrationFailedCallback(sdRef, flags, errorCode, errorMessage, name, regtype, domain):
print "SERVICE REGISTRATION FAILED CALLBACK FIRED, params: sdRef=%d, flags=%d, errorCode=%d, errorMessage=%s, name=%s, regtype=%s, domain=%s" % (sdRef, flags, errorCode, errorMessage, name, regtype, domain)
def serviceUnregisteredCallback(serviceName, serviceType, port):
print "SERVICE UNREGISTERED CALLBACK FIRED, params: serviceName=%s, serviceType=%s, port=%d" % (serviceName, serviceType, port)
def peerServiceDiscoveryCallback(serviceName, interfaceIndex, fullname, hosttarget, ip, port):
print "SERVICE DISCOVERY CALLBACK FIRED, params: serviceName=%s, interfaceIndex=%d, fullname=%s, hosttarget=%s, ip=%s, port=%d" % (serviceName, interfaceIndex, fullname, hosttarget, ip, port)
def peerServiceRemovalCallback(serviceName, interfaceIndex):
print "SERVICE REMOVAL CALLBACK FIRED, params: serviceName=%s, interfaceIndex=%d" % (serviceName, interfaceIndex)
def peerServiceUpdateCallback(serviceName, interfaceIndex, fullname, hosttarget, ip, port):
print "SERVICE UPDATE CALLBACK FIRED, params: serviceName=%s, interfaceIndex=%d, fullname=%s, hosttarget=%s, ip=%s, port=%d" % (serviceName, interfaceIndex, fullname, hosttarget, ip, port)
def peerServiceDescriptionUpdatedCallback(serviceName, interfaceIndex, txtRecords, updated, deleted):
print "SERVICE DESCRIPTION UPDATED CALLBACK FIRED", serviceName, interfaceIndex, txtRecords
print "\tupdated:"
for key in updated:
print "\t\t", key, txtRecords[key]
print "\tdeleted:"
for key in deleted:
print "\t\t", key
parser = OptionParser()
parser.add_option("-l", "--listenOnly", action="store_true", dest="listenOnly",
help="listen only (don't broadcast)")
(options, args) = parser.parse_args()
# Initialize ZeroconfMessaging.
z = ZeroconfMessaging(serviceName = platform.node(),
serviceType = '_manyinarow._tcp',
port = None,
protocolVersion = 1,
serviceRegisteredCallback=serviceRegisteredCallback,
serviceRegistrationFailedCallback=serviceRegistrationFailedCallback,
serviceUnregisteredCallback=serviceUnregisteredCallback,
peerServiceDiscoveryCallback=peerServiceDiscoveryCallback,
peerServiceRemovalCallback=peerServiceRemovalCallback,
peerServiceUpdateCallback=peerServiceUpdateCallback,
peerServiceDescriptionUpdatedCallback=peerServiceDescriptionUpdatedCallback)
# Prepare two messages to be broadcast.
message = {
'status' : 'playing',
'playerName' : 'Wim',
'hostedGame' : "Wim's game",
'participatedGame' : "Wim's game",
'players' : 3,
'timePlaying' : 123,
'newMoves' : [(3245, 'Brecht', 2), (3246, 'Kristof', 3)],
}
messageTwo = copy.deepcopy(message)
messageTwo['players'] = 2
del messageTwo['newMoves']
# Sample usage.
z.start()
if not options.listenOnly:
# Put messages in the outbox.
time.sleep(5)
with z.lock:
z.outbox.put(message)
time.sleep(2)
with z.lock:
z.outbox.put(messageTwo)
time.sleep(10)
else:
# Get messages from the inbox.
with z.lock:
endTime = time.time() + 15
# Keep fetching messages until the end time has been reached.
while time.time() < endTime:
# Wait for a next message.
while z.inbox.qsize() == 0 and time.time() < endTime:
z.lock.wait(1) # Timeout after 1 second of waiting.
if z.inbox.qsize() > 0:
message = z.inbox.get()
print "MESSAGE IN INBOX", message
z.kill()
|
|
import fs.utils
import fs.path
import tempfile
import shutil
import time
from fs.osfs import OSFS
from fs.multifs import MultiFS
from fs.errors import (ResourceLockedError)
from contextlib import contextmanager
# HELPER FUNCTIONS
def _close(filesys):
closed = False
for _ in range(5):
try:
filesys.close()
closed = True
break
except ResourceLockedError as e:
time.sleep(0.5)
if not closed:
raise e
def _makedirs(filesystem, path):
filesystem.makedir(path, recursive=True, allow_recreate=True)
def _touch(filesystem, path):
_makedirs(filesystem, fs.path.dirname(path))
if not filesystem.isfile(path):
filesystem.createfile(path)
# HELPER CONTEXT MANAGERS
@contextmanager
def _choose_read_fs(authority, cache, read_path, version_check, hasher):
'''
Context manager returning the appropriate up-to-date readable filesystem
Use ``cache`` if it is a valid filessystem and has a file at
``read_path``, otherwise use ``authority``. If the file at
``read_path`` is out of date, update the file in ``cache`` before
returning it.
'''
if cache and cache.fs.isfile(read_path):
if version_check(hasher(cache.fs.open(read_path, 'rb'))):
yield cache.fs
elif authority.fs.isfile(read_path):
fs.utils.copyfile(
authority.fs,
read_path,
cache.fs,
read_path)
yield cache.fs
else:
_makedirs(authority.fs, fs.path.dirname(read_path))
_makedirs(cache.fs, fs.path.dirname(read_path))
yield cache.fs
else:
if not authority.fs.isfile(read_path):
_makedirs(authority.fs, fs.path.dirname(read_path))
yield authority.fs
@contextmanager
def _get_write_fs():
'''
Context manager returning a writable filesystem
Use a temporary directory and clean on exit.
.. todo::
Evaluate options for using a cached memoryFS or streaming object
instead of an OSFS(tmp). This could offer significant performance
improvements. Writing to the cache is less of a problem since this
would be done in any case, though performance could be improved by
writing to an in-memory filesystem and then writing to both cache and
auth.
'''
tmp = tempfile.mkdtemp()
try:
# Create a writeFS and path to the directory containing the archive
write_fs = OSFS(tmp)
try:
yield write_fs
finally:
_close(write_fs)
finally:
shutil.rmtree(tmp)
@contextmanager
def _prepare_write_fs(read_fs, cache, read_path, readwrite_mode=True):
'''
Prepare a temporary filesystem for writing to read_path
The file will be moved to write_path on close if modified.
'''
with _get_write_fs() as write_fs:
# If opening in read/write or append mode, make sure file data is
# accessible
if readwrite_mode:
if not write_fs.isfile(read_path):
_touch(write_fs, read_path)
if read_fs.isfile(read_path):
fs.utils.copyfile(
read_fs, read_path, write_fs, read_path)
else:
_touch(write_fs, read_path)
yield write_fs
# AVAILABLE I/O CONTEXT MANAGERS
@contextmanager
def open_file(
authority,
cache,
update,
version_check,
hasher,
read_path,
write_path=None,
cache_on_write=False,
mode='r',
*args,
**kwargs):
'''
Context manager for reading/writing an archive and uploading on changes
Parameters
----------
authority : object
:py:mod:`pyFilesystem` filesystem object to use as the authoritative,
up-to-date source for the archive
cache : object
:py:mod:`pyFilesystem` filesystem object to use as the cache. Default
``None``.
use_cache : bool
update, service_path, version_check, \*\*kwargs
'''
if write_path is None:
write_path = read_path
with _choose_read_fs(
authority, cache, read_path, version_check, hasher) as read_fs:
write_mode = ('w' in mode) or ('a' in mode) or ('+' in mode)
if write_mode:
readwrite_mode = (
('a' in mode) or (
('r' in mode) and (
'+' in mode)))
with _prepare_write_fs(
read_fs, cache, read_path, readwrite_mode) as write_fs:
wrapper = MultiFS()
wrapper.addfs('reader', read_fs)
wrapper.setwritefs(write_fs)
with wrapper.open(read_path, mode, *args, **kwargs) as f:
yield f
info = write_fs.getinfokeys(read_path, 'size')
if 'size' in info:
if info['size'] == 0:
return
with write_fs.open(read_path, 'rb') as f:
checksum = hasher(f)
if not version_check(checksum):
if (
cache_on_write or
(
cache
and (
fs.path.abspath(read_path) ==
fs.path.abspath(write_path))
and cache.fs.isfile(read_path)
)
):
_makedirs(cache.fs, fs.path.dirname(write_path))
fs.utils.copyfile(
write_fs, read_path, cache.fs, write_path)
_makedirs(authority.fs, fs.path.dirname(write_path))
fs.utils.copyfile(
cache.fs, write_path, authority.fs, write_path)
else:
_makedirs(authority.fs, fs.path.dirname(write_path))
fs.utils.copyfile(
write_fs, read_path, authority.fs, write_path)
update(**checksum)
else:
with read_fs.open(read_path, mode, *args, **kwargs) as f:
yield f
@contextmanager
def get_local_path(
authority,
cache,
update,
version_check,
hasher,
read_path,
write_path=None,
cache_on_write=False):
'''
Context manager for retrieving a system path for I/O and updating on change
Parameters
----------
authority : object
:py:mod:`pyFilesystem` filesystem object to use as the authoritative,
up-to-date source for the archive
cache : object
:py:mod:`pyFilesystem` filesystem object to use as the cache. Default
``None``.
use_cache : bool
update, service_path, version_check, \*\*kwargs
'''
if write_path is None:
write_path = read_path
with _choose_read_fs(
authority, cache, read_path, version_check, hasher) as read_fs:
with _prepare_write_fs(
read_fs, cache, read_path, readwrite_mode=True) as write_fs:
yield write_fs.getsyspath(read_path)
if write_fs.isfile(read_path):
info = write_fs.getinfokeys(read_path, 'size')
if 'size' in info:
if info['size'] == 0:
return
with write_fs.open(read_path, 'rb') as f:
checksum = hasher(f)
if not version_check(checksum):
if (
cache_on_write or
(
cache
and (
fs.path.abspath(read_path) ==
fs.path.abspath(write_path))
and cache.fs.isfile(read_path)
)
):
_makedirs(cache.fs, fs.path.dirname(write_path))
fs.utils.copyfile(
write_fs, read_path, cache.fs, write_path)
_makedirs(authority.fs, fs.path.dirname(write_path))
fs.utils.copyfile(
cache.fs, write_path, authority.fs, write_path)
else:
_makedirs(authority.fs, fs.path.dirname(write_path))
fs.utils.copyfile(
write_fs, read_path, authority.fs, write_path)
update(**checksum)
else:
raise OSError(
'Local file removed during execution. '
'Archive not updated.')
|
|
import unittest
from random import shuffle
from sqlalchemy.orm import defaultload, selectinload
from . import models
from .util import QueryLogger, TestQueryStringsMixin
from .saversion import SA_SINCE, SA_UNTIL
from mongosql import selectinquery
# Detect SqlAlchemy version
# We need to differentiate, because:
# in 1.2.x, selectinload() builds a JOIN query from the left entity to the right entity
# in 1.3.x, selectinload() queries just the right entity, and filters by the foreign key field directly
from mongosql import SA_12, SA_13
class SelectInQueryLoadTest(unittest.TestCase, TestQueryStringsMixin):
@classmethod
def setUpClass(cls):
cls.engine, cls.Session = models.get_working_db_for_tests()
cls.ssn = cls.Session() # let every test reuse the same session; expect some interference issues
def test_filter(self):
""" selectinquery() + filter """
engine, ssn = self.engine, self.ssn
# Test: load a relationship, filtered
with QueryLogger(engine) as ql:
q = ssn.query(models.User).options(selectinquery(
models.User.articles,
lambda q, **kw: q.filter(models.Article.id.between(11,21))
))
res = q.all()
# Test query
if SA_12:
# SqlAlchemy 1.2.x used to make a JOIN
self.assertQuery(ql[1],
'FROM u AS u_1 JOIN a ON u_1.id = a.uid',
'WHERE u_1.id IN (1, 2, 3) AND '
'a.id BETWEEN 11 AND 21 '
'ORDER BY u_1.id',
)
else:
# SqlAlchemy 1.3.x uses foreign keys directly, no joins
self.assertNotIn(ql[1], 'JOIN')
self.assertQuery(ql[1],
'WHERE a.uid IN (1, 2, 3) AND ',
'a.id BETWEEN 11 AND 21 ',
# v1.3.16: no ordering by PK anymore
'ORDER BY a.uid' if SA_UNTIL('1.3.15') else '',
)
# Test results
self.assert_users_articles_comments(res, 3, 4, None) # 3 users, 4 articles in total
def test_plain_old_selectinload(self):
""" Test plain selectinload() """
engine, ssn = self.engine, self.ssn
with QueryLogger(self.engine) as ql:
q = ssn.query(models.User).options(selectinload(models.User.articles))
res = q.all()
# Test query
if SA_12:
self.assertQuery(ql[1],
'WHERE u_1.id IN (1, 2, 3)',
# v1.3.16: no ordering by PK anymore
'ORDER BY u_1.id' if SA_UNTIL('1.3.15') else '',
)
else:
self.assertQuery(ql[1],
'WHERE a.uid IN (1, 2, 3)',
# v1.3.16: no ordering by PK anymore
'ORDER BY a.uid' if SA_UNTIL('1.3.15') else '',
)
# Test results
self.assert_users_articles_comments(res, 3, 6, None) # 3 users, 6 articles in total
def test_options(self):
""" selectinquery() + options(load_only()) + limit """
engine, ssn = self.engine, self.ssn
with QueryLogger(engine) as ql:
q = ssn.query(models.User).options(selectinquery(
models.User.articles,
# Notice how we still have to apply the options using the relationship!
lambda q, **kw: q.options(defaultload(models.User.articles)
.load_only(models.Article.title)).limit(1)
))
res = q.all()
# Test query
self.assertQuery(ql[1], 'LIMIT 1')
if SA_12:
self.assertSelectedColumns(ql[1], 'a.id', 'u_1.id', 'a.title') # PK, FK, load_only()
else:
self.assertSelectedColumns(ql[1], 'a.id', 'a.uid', 'a.title') # PK, FK, load_only()
# Test results
self.assert_users_articles_comments(res, 3, 1, None) # 3 users, 1 article in total ; just one, because of the limit
def test_options_joinedload(self):
""" selectinquery() + options(joinedload()) """
engine, ssn = self.engine, self.ssn
with QueryLogger(engine) as ql:
q = ssn.query(models.User).options(selectinquery(
models.User.articles,
lambda q, **kw: q.options(defaultload(models.User.articles)
.joinedload(models.Article.comments))
))
res = q.all()
# Test query
self.assertQuery(ql[1], 'LEFT OUTER JOIN c AS c_1 ON a.id = c_1.aid')
# Test results
self.assert_users_articles_comments(res, 3, 6, 9) # 3 users, 6 articles, 9 comments
def test_options_selectinload(self):
""" selectinquery() + options(selectinload()) """
engine, ssn = self.engine, self.ssn
with QueryLogger(engine) as ql:
q = ssn.query(models.User).options(selectinquery(
models.User.articles,
lambda q, **kw: q.options(defaultload(models.User.articles)
.selectinload(models.Article.comments))
))
res = q.all()
# Test second query
if SA_12:
self.assertQuery(ql[2], 'JOIN c')
else:
self.assertQuery(ql[2], 'FROM c')
# Test results
self.assert_users_articles_comments(res, 3, 6, 9) # 3 users, 6 articles, 9 comments
def test_options_selectinquery(self):
""" selectinquery() + load_only() + options(selectinquery() + load_only()) """
engine, ssn = self.engine, self.ssn
with QueryLogger(engine) as ql:
q = ssn.query(models.User).options(selectinquery(
models.User.articles,
lambda q, **kw: q
.filter(models.Article.id > 10) # first level filter()
.options(defaultload(models.User.articles)
.load_only(models.Article.title) # first level options()
.selectinquery(models.Article.comments,
lambda q, **kw:
q
.filter(models.Comment.uid > 1) # second level filter()
.options(
defaultload(models.User.articles)
.defaultload(models.Article.comments)
.load_only(models.Comment.text) # second level options()
)))
))
res = q.all()
# Test query
self.assertQuery(ql[1], 'AND a.id > 10')
if SA_12:
self.assertSelectedColumns(ql[1], 'a.id', 'u_1.id', 'a.title') # PK, FK, load_only()
else:
self.assertSelectedColumns(ql[1], 'a.id', 'a.uid', 'a.title') # PK, FK, load_only()
# Test second query
self.assertQuery(ql[2], 'AND c.uid > 1')
if SA_12:
self.assertSelectedColumns(ql[2], 'c.id', 'a_1.id', 'c.text') # PK, FK, load_only()
else:
self.assertSelectedColumns(ql[2], 'c.id', 'c.aid', 'c.text') # PK, FK, load_only()
# Test results
self.assert_users_articles_comments(res, 3, 5, 1) # 3 users, 5 articles, 1 comment
# Re-run all tests in wild combinations
def test_all_tests_interference(self):
""" Repeat all tests by randomly mixing them and running them in different order
to make sure that they do not interfere with each other """
all_tests = (getattr(self, name)
for name in dir(self)
if name.startswith('test_')
and name != 'test_all_tests_interference')
for i in range(20):
# Make a randomized mix of all tests
tests = list(all_tests)
shuffle(tests)
# Run them all
print('='*20 + ' Random run #{}'.format(i))
for t in tests:
try:
# Repeat every test several times
for n in range(3):
t()
except unittest.SkipTest: pass # proceed
def assert_users_articles_comments(self, users, n_users, n_articles=None, n_comments=None):
self.assertEqual(len(users), n_users)
if n_articles is not None:
self.assertEqual(sum(len(u.articles) for u in users), n_articles)
if n_comments is not None:
self.assertEqual(sum(sum(len(a.comments) for a in u.articles) for u in users), n_comments)
|
|
"""receiver.
This module defines the receiving part for alarmRetarder.
"""
import threading
import logging
from pysnmp.entity import engine, config
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.entity.rfc3413 import ntfrcv
from .entities import Alert
class Receiver(threading.Thread):
""" An alarm receiver.
This is an abstract class for an alarm receiver.
A concrete implementation receives an alarm an schedules an alert
using the scheduler.
Attributes:
config: an object for getting or setting configuration values
scheduler: an object for scheduling alerts
run_event: a threading.Event for checking the shutdown state
of the application.
"""
def __init__(self, config, scheduler, run_event):
"""inits the Receiver class."""
threading.Thread.__init__(self)
self._scheduler = scheduler
self._config = config
self._run_event = run_event
self._logger = logging.getLogger("receiver")
def schedule_alert(self, alert_id, alert_type, alert_key, alert_severity, alert_logmessage):
"""Schedules an alert after receiving an alarm.
Creates an Alert object with the given arguments and sending
it to the Scheduler.
Args:
alert_id: alarm ID
alert_type: an integer. 1 = PROBLEM, 2 = RESOLUTION
alert_key: alarm key
alert_severity: alarm severity
alert_logmessage: alarm message
Returns:
None
"""
alert = Alert(alert_id, alert_type, alert_key, alert_severity, alert_logmessage)
self._scheduler.add_alert(alert)
def set_config_option(self, section_name, key, value):
"""Sets a configuration option.
This function sets an option in the configuration or changes
an exisiting configuration entry.
Args:
section_name: name of the config section
key: key of the configuration option
value: new value of the configuration option
Returns:
None
"""
# get old value and change config
old_value = self._config.get_value(section_name, key, "")
if old_value != value:
self._config.set_value(section_name, key, value)
# send ConfigChangedAlert
self._scheduler.add_config_changed_alert(section_name, key, old_value, value)
def run(self):
"""Receive function.
This is the function which has to implement the receiving of
new alarms. It will be executed as own thread. The
implementation should look for the runEvent, which will be set
to FALSE if the application shuts down. If an alarm was
received the functions schedule_alert(...) or
set_config_option(...) should be executed.
Args:
Returns:
None
"""
raise ImplementationError()
class SnmpTrapReceiver(Receiver):
"""Receives alarms by SNMP trap.
This class is an implementation of the abstract Receiver class and
receives alarms by SNMP traps. It reads the following parameter
from the configuration section "SnmpTrapReceiver".
Configuration:
listenaddress: interface to listen for SNMP traps.
listenport: port to listen for SNMP traps.
community: SNMP community for the trap receiver.
"""
def run(self):
"""Receives SNMP traps.
Start the receiving of SNMP traps in a new thread. If a
received trap is a alertTrap, the alert will be scheduled. If
the received trap is a configTrap, the configuration option
will be changed. All other traps will be ignored.
"""
def check_shutdown(time_now):
"""Callback function: shutdown handler.
This function will be used by pySNMP as callback function
to check, if the application is on the way to shutdown.
Args:
timeNow: the actual time (not used in this case.
Returns:
None.
"""
if self._run_event.is_set() is False:
snmp_engine.transportDispatcher.jobFinished(1)
def trap_received(snmp_engine, state_reference, context_engine_id,
context_name, var_binds, cb_ctx):
"""Callback function: receiving a trap.
This is a callback function used by pySNMP and executed
each time, if a SNMP trap was received. It checks the type
of the trap and schedules an alert or sets a configuration
option.
Args:
snmp_engine: pySNMP snmpEngine.
state_reference: pySNMP stateReference.
context_engine_id: pySNMP contextEngineId.
context_name: pySNMP contextName.
var_binds: pySNMP varBinds.
cb_Ctx: pySNMP cbCtx.
"""
alert_id = ""
alert_type = ""
alert_key = ""
alert_severity = ""
alert_logmessage = ""
config_section = ""
config_key = ""
config_value = ""
trap_oid = ""
for var_bind in var_binds:
var_bind_name = str(var_bind[0])
var_bind_value = str(var_bind[1])
if var_bind_name == "1.3.6.1.6.3.1.1.4.1.0":
trap_oid = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.1":
alert_id = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.2":
alert_type = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.3":
alert_key = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.4":
alert_severity = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.5":
alert_logmessage = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.10":
config_section = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.11":
config_key = var_bind_value
elif var_bind_name == "1.3.6.1.4.1.99999.3.12":
config_value = var_bind_value
if trap_oid == "1.3.6.1.4.1.99999.3.0.1":
self._logger.info("alert trap received: id=%s, type=%s, key=%s, "
"severity=%s, logmsg=%s", alert_id, alert_type, alert_key,
alert_severity, alert_logmessage)
self.schedule_alert(alert_id, alert_type, alert_key,
alert_severity, alert_logmessage)
elif trap_oid == "1.3.6.1.4.1.99999.3.0.2":
self._logger.info("config trap received: section=%s, key=%s, value=%s",
config_section, config_key, config_value)
self.set_config_option(config_section, config_key, config_value)
else:
self._logger.warn("trap with no matching configuration received")
# get configuration
config_listen_address = self._config.get_value("SnmpTrapReceiver", "listenaddress",
"127.0.0.1")
config_listen_port = int(self._config.get_value("SnmpTrapReceiver", "listenport", "162"))
config_community = self._config.get_value("SnmpTrapReceiver", "community", "public")
# create and configure SNMP engine
snmp_engine = engine.SnmpEngine()
config.addTransport(snmp_engine,
udp.domainName + (1,),
udp.UdpTransport().openServerMode((config_listen_address,
config_listen_port)))
config.addV1System(snmp_engine, 'my-area', config_community)
# register callback function
ntfrcv.NotificationReceiver(snmp_engine, trap_received)
# register timer callback function
snmp_engine.transportDispatcher.registerTimerCbFun(check_shutdown)
# start dispatcher
snmp_engine.transportDispatcher.jobStarted(1)
try:
snmp_engine.transportDispatcher.runDispatcher()
except:
snmp_engine.transportDispatcher.closeDispatcher()
raise
|
|
# -*- coding: utf-8 -*-
"""The artifact definitions registry."""
from artifacts import definitions
from artifacts import errors
from artifacts import source_type
class ArtifactDefinitionsRegistry(object):
"""Artifact definitions registry."""
_source_type_classes = {
definitions.TYPE_INDICATOR_ARTIFACT_GROUP:
source_type.ArtifactGroupSourceType,
definitions.TYPE_INDICATOR_COMMAND: source_type.CommandSourceType,
definitions.TYPE_INDICATOR_DIRECTORY: source_type.DirectorySourceType,
definitions.TYPE_INDICATOR_FILE: source_type.FileSourceType,
definitions.TYPE_INDICATOR_PATH: source_type.PathSourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY:
source_type.WindowsRegistryKeySourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE:
source_type.WindowsRegistryValueSourceType,
definitions.TYPE_INDICATOR_WMI_QUERY: source_type.WMIQuerySourceType,
}
def __init__(self):
"""Initializes an artifact definitions registry."""
super(ArtifactDefinitionsRegistry, self).__init__()
self._artifact_definitions_by_alias = {}
self._artifact_definitions_by_name = {}
self._artifact_name_references = set()
self._defined_artifact_names = set()
@classmethod
def CreateSourceType(cls, type_indicator, attributes):
"""Creates a source type object.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
"""
if type_indicator not in cls._source_type_classes:
raise errors.FormatError(
'Unsupported type indicator: {0:s}.'.format(type_indicator))
return cls._source_type_classes[type_indicator](**attributes)
def DeregisterDefinition(self, artifact_definition):
"""Deregisters an artifact definition.
Artifact definitions are identified based on their lower case name.
Args:
artifact_definition (ArtifactDefinition): an artifact definition.
Raises:
KeyError: if an artifact definition is not set for the corresponding name.
"""
artifact_definition_name = artifact_definition.name.lower()
if artifact_definition_name not in self._artifact_definitions_by_name:
raise KeyError(
'Artifact definition not set for name: {0:s}.'.format(
artifact_definition.name))
for alias in artifact_definition.aliases:
if alias.lower() not in self._artifact_definitions_by_alias:
raise KeyError(
'Artifact definition not set for alias: {0:s}.'.format(alias))
del self._artifact_definitions_by_name[artifact_definition_name]
for alias in artifact_definition.aliases:
del self._artifact_definitions_by_alias[alias.lower()]
@classmethod
def DeregisterSourceType(cls, source_type_class):
"""Deregisters a source type.
Source types are identified based on their type indicator.
Args:
source_type_class (type): source type.
Raises:
KeyError: if a source type is not set for the corresponding type
indicator.
"""
if source_type_class.TYPE_INDICATOR not in cls._source_type_classes:
raise KeyError(
'Source type not set for type: {0:s}.'.format(
source_type_class.TYPE_INDICATOR))
del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
def GetDefinitionByAlias(self, alias):
"""Retrieves a specific artifact definition by alias.
Args:
alias (str): alias of the artifact definition.
Returns:
ArtifactDefinition: an artifact definition or None if not available.
"""
if not alias:
return None
return self._artifact_definitions_by_alias.get(alias.lower(), None)
def GetDefinitionByName(self, name):
"""Retrieves a specific artifact definition by name.
Args:
name (str): name of the artifact definition.
Returns:
ArtifactDefinition: an artifact definition or None if not available.
"""
if not name:
return None
return self._artifact_definitions_by_name.get(name.lower(), None)
def GetDefinitions(self):
"""Retrieves the artifact definitions.
Returns:
list[ArtifactDefinition]: artifact definitions.
"""
return self._artifact_definitions_by_name.values()
def GetUndefinedArtifacts(self):
"""Retrieves the names of undefined artifacts used by artifact groups.
Returns:
set[str]: undefined artifacts names.
"""
return self._artifact_name_references - self._defined_artifact_names
def RegisterDefinition(self, artifact_definition):
"""Registers an artifact definition.
Artifact definitions are identified based on their lower case name.
Args:
artifact_definition (ArtifactDefinition): an artifact definition.
Raises:
KeyError: if artifact definition is already set for the corresponding
name or alias.
"""
artifact_definition_name = artifact_definition.name.lower()
if artifact_definition_name in self._artifact_definitions_by_name:
raise KeyError(
'Artifact definition already set for name: {0:s}.'.format(
artifact_definition.name))
for alias in artifact_definition.aliases:
alias_lower = alias.lower()
if alias_lower in self._artifact_definitions_by_alias:
raise KeyError(
'Artifact definition already set for alias: {0:s}.'.format(alias))
if alias_lower in self._artifact_definitions_by_name:
raise KeyError(
'Artifact definition alias: {1:s} already used as name.'.format(
alias))
self._artifact_definitions_by_name[artifact_definition_name] = (
artifact_definition)
self._defined_artifact_names.add(artifact_definition.name)
for alias in artifact_definition.aliases:
self._artifact_definitions_by_alias[alias.lower()] = artifact_definition
for source in artifact_definition.sources:
if source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP:
self._artifact_name_references.update(source.names)
@classmethod
def RegisterSourceType(cls, source_type_class):
"""Registers a source type.
Source types are identified based on their type indicator.
Args:
source_type_class (type): source type.
Raises:
KeyError: if source types is already set for the corresponding
type indicator.
"""
if source_type_class.TYPE_INDICATOR in cls._source_type_classes:
raise KeyError(
'Source type already set for type: {0:s}.'.format(
source_type_class.TYPE_INDICATOR))
cls._source_type_classes[source_type_class.TYPE_INDICATOR] = (
source_type_class)
@classmethod
def RegisterSourceTypes(cls, source_type_classes):
"""Registers source types.
Source types are identified based on their type indicator.
Args:
source_type_classes (list[type]): source types.
"""
for source_type_class in source_type_classes:
cls.RegisterSourceType(source_type_class)
def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):
"""Reads artifact definitions into the registry from files in a directory.
This function does not recurse sub directories.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
path (str): path of the directory to read from.
extension (Optional[str]): extension of the filenames to read.
Raises:
KeyError: if a duplicate artifact definition is encountered.
"""
for artifact_definition in artifacts_reader.ReadDirectory(
path, extension=extension):
self.RegisterDefinition(artifact_definition)
def ReadFromFile(self, artifacts_reader, filename):
"""Reads artifact definitions into the registry from a file.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
filename (str): name of the file to read from.
"""
for artifact_definition in artifacts_reader.ReadFile(filename):
self.RegisterDefinition(artifact_definition)
def ReadFileObject(self, artifacts_reader, file_object):
"""Reads artifact definitions into the registry from a file-like object.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
file_object (file): file-like object to read from.
"""
for artifact_definition in artifacts_reader.ReadFileObject(file_object):
self.RegisterDefinition(artifact_definition)
|
|
""" test with the .transform """
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range
from pandas.core.dtypes.common import (
ensure_platform_int, is_timedelta64_dtype)
from pandas.compat import StringIO
from pandas._libs import groupby
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby.groupby import DataError
from pandas.core.config import option_context
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq='M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index, name='val')
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
# GH 12737
df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
'd': pd.date_range('2014-1-1', '2014-1-4'),
'i': [1, 2, 3, 4]},
columns=['grouping', 'f', 'i', 'd'])
result = df.groupby('grouping').transform('first')
dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
'd': dates,
'i': [1, 2, 2, 4]},
columns=['f', 'i', 'd'])
assert_frame_equal(result, expected)
# selection
result = df.groupby('grouping')[['f', 'i']].transform('first')
expected = expected[['f', 'i']]
assert_frame_equal(result, expected)
# dup columns
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
result = df.groupby('g').transform('first')
expected = df.drop('g', axis=1)
assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = pd.Series([True, True], name='A')
df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = pd.Series([
Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
# this does date math without changing result type in transform
base_time = df['A'][0]
result = df.groupby('A')['A'].transform(
lambda x: x.max() - x.min() + base_time) - base_time
assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
expected = Series([-0.5, 0.5], name='b')
assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
expected = Series([0, 1], name='b')
assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(StringIO(data), sep=r'\s+',
index_col=[0], parse_dates=['DATETIME'])
result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby('A')['C', 'D'].transform(f)
selection = df[['C', 'D']]
expected = selection.groupby(df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(df):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
result = df.groupby('A').transform('mean')
expected = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = df.groupby('A')['C'].transform('mean')
expected = df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
'd': pd.date_range('2014-1-1', '2014-1-4')})
result = df.groupby('grouping')['d'].transform('first')
dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-4')]
expected = pd.Series(dates, name='d')
assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
g = df.groupby('A')
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
assert_frame_equal(result, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = pd.DataFrame({'a': range(10),
'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)['a'].transform(max)
expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
name='a')
assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
assert result['d'].dtype == np.float64
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.loc[key])
def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
pd_op : callable
The pandas cumulative function.
np_op : callable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
def test_cython_group_transform_cumsum(any_real_dtype):
# see gh-4095
dtype = np.dtype(any_real_dtype).type
pd_op, np_op = groupby.group_cumsum, np.cumsum
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
pd_op, np_op = groupby.group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(
expected,
data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize('input, exp', [
# When everything is NaN
({'key': ['b'] * 10, 'value': np.nan},
pd.Series([np.nan] * 10, name='value')),
# When there is a single NaN
({'key': ['b'] * 10 + ['a'] * 2,
'value': [3] * 3 + [np.nan] + [3] * 8},
{('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
2187., 6561., 19683., 3.0, 9.0],
('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
21., 24., 27., 3.0, 6.0]})])
def test_groupby_cum_skipna(op, skipna, input, exp):
df = pd.DataFrame(input)
result = df.groupby('key')['value'].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = pd.Series(expected, name='value')
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50},
columns=['float', 'float_missing', 'int', 'datetime',
'timedelta', 'string', 'string_missing'])
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_group_selection()
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(
expected,
getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
pytest.raises(DataError, gb[c].transform, op)
pytest.raises(DataError, getattr(gb[c], op))
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
tm.assert_raises_regex(ValueError, 'transform must return '
'a scalar value for each '
'group.*',
df.groupby(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize('cols,exp,comp_func', [
('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
(['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
tm.assert_frame_equal)
])
@pytest.mark.parametrize('agg_func', [
'count', 'rank', 'size'])
def test_transform_numeric_ret(cols, exp, comp_func, agg_func):
if agg_func == 'size' and isinstance(cols, list):
pytest.xfail("'size' transformation not supported with "
"NDFrameGroupy")
# GH 19200
df = pd.DataFrame(
{'a': pd.date_range('2018-01-01', periods=3),
'b': range(3),
'c': range(7, 10)})
result = df.groupby('b')[cols].transform(agg_func)
if agg_func == 'rank':
exp = exp.astype('float')
comp_func(result, exp)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [
('foo', 'bar'), (1, 2), (1., 2.)])
@pytest.mark.parametrize("fill_method,limit,exp_vals", [
("ffill", None,
[np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
("ffill", 1,
[np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
("bfill", None,
['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
("bfill", 1,
[np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
])
def test_group_fill_methods(mix_groupings, as_series, val1, val2,
fill_method, limit, exp_vals):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == 'val1':
_exp_vals[index] = val1
elif exp_val == 'val2':
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ['a', 'b'] * len(vals)
def interweave(list_obj):
temp = list()
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ['a'] * len(vals) + ['b'] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({'key': keys, 'val': vals})
if as_series:
result = getattr(
df.groupby('key')['val'], fill_method)(limit=limit)
exp = Series(_exp_vals, name='val')
assert_series_equal(result, exp)
else:
result = getattr(df.groupby('key'), fill_method)(limit=limit)
exp = DataFrame({'key': keys, 'val': _exp_vals})
assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == 'bfill':
y = y[::-1]
df = pd.DataFrame({'x': x, 'y': y})
expected = df.copy()
result = getattr(df.groupby('x'), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
(-1, 'bfill', None), (-1, 'bfill', 1)])
def test_pct_change(test_series, periods, fill_method, limit):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
exp_vals = Series(vals).pct_change(periods=periods,
fill_method=fill_method,
limit=limit).tolist()
df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
'vals': vals * 2})
grp = df.groupby('key')
def get_result(grp_obj):
return grp_obj.pct_change(periods=periods,
fill_method=fill_method,
limit=limit)
if test_series:
exp = pd.Series(exp_vals * 2)
exp.name = 'vals'
grp = grp['vals']
result = get_result(grp)
tm.assert_series_equal(result, exp)
else:
exp = DataFrame({'vals': exp_vals * 2})
result = get_result(grp)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = pd.DataFrame([['foo', True],
[np.nan, True],
['foo', True]], columns=['key', 'val'])
exp = pd.Series([True, np.nan, True], name='val')
res = df.groupby('key')['val'].transform(func)
tm.assert_series_equal(res, exp)
|
|
from __future__ import unicode_literals
from collections import OrderedDict
from distutils.version import LooseVersion
import os
from github3 import login
import hiyapyco
import raven
import cumulusci
from cumulusci.core.config import BaseTaskFlowConfig
from cumulusci.core.exceptions import ConfigError
from cumulusci.core.exceptions import DependencyResolutionError
from cumulusci.core.exceptions import KeychainNotFound
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci.core.exceptions import ServiceNotValid
class BaseProjectConfig(BaseTaskFlowConfig):
""" Base class for a project's configuration which extends the global config """
search_path = ['config']
def __init__(self, global_config_obj, config=None):
self.global_config_obj = global_config_obj
self.keychain = None
if not config:
config = {}
super(BaseProjectConfig, self).__init__(config=config)
@property
def config_global_local(self):
return self.global_config_obj.config_global_local
@property
def config_global(self):
return self.global_config_obj.config_global
@property
def repo_info(self):
if hasattr(self, '_repo_info'):
return self._repo_info
# Detect if we are running in a CI environment and get repo info
# from env vars for the enviornment instead of .git files
info = {
'ci': None
}
# Make sure that the CUMULUSCI_AUTO_DETECT environment variable is
# set before trying to auto-detect anything from the environment
if not os.environ.get('CUMULUSCI_AUTO_DETECT'):
self._repo_info = info
return self._repo_info
# Heroku CI
heroku_ci = os.environ.get('HEROKU_TEST_RUN_ID')
if heroku_ci:
info = {
'branch': os.environ.get('HEROKU_TEST_RUN_BRANCH'),
'commit': os.environ.get('HEROKU_TEST_RUN_COMMIT_VERSION'),
'ci': 'heroku',
'root': '/app',
}
# Other CI environment implementations can be implemented here...
# Apply CUMULUSCI_REPO_* environment variables last so they can
# override and fill in missing values from the CI environment
repo_branch = os.environ.get('CUMULUSCI_REPO_BRANCH')
if repo_branch:
if repo_branch != info.get('branch'):
self.logger.info(
'CUMULUSCI_REPO_BRANCH found, using its value as the branch'
)
info['branch'] = repo_branch
repo_commit = os.environ.get('CUMULUSCI_REPO_COMMIT')
if repo_commit:
if repo_commit != info.get('commit'):
self.logger.info(
'CUMULUSCI_REPO_COMMIT found, using its value as the commit'
)
info['commit'] = repo_commit
repo_root = os.environ.get('CUMULUSCI_REPO_ROOT')
if repo_root:
if repo_root != info.get('root'):
self.logger.info(
'CUMULUSCI_REPO_ROOT found, using its value as the repo root'
)
info['root'] = repo_root
repo_url = os.environ.get('CUMULUSCI_REPO_URL')
if repo_url:
if repo_url != info.get('url'):
self.logger.info(
'CUMULUSCI_REPO_URL found, using its value as the repo url, owner, and name'
)
url_info = self._split_repo_url(repo_url)
info.update(url_info)
# If running in a CI environment, make sure we have all the needed
# git info or throw a ConfigError
if info['ci']:
validate = OrderedDict((
# <key>, <env var to manually override>
('branch', 'CUMULUSCI_REPO_BRANCH'),
('commit', 'CUMULUSCI_REPO_COMMIT'),
('name', 'CUMULUSCI_REPO_URL'),
('owner', 'CUMULUSCI_REPO_URL'),
('root', 'CUMULUSCI_REPO_ROOT'),
('url', 'CUMULUSCI_REPO_URL'),
))
for key, env_var in list(validate.items()):
if key not in info or not info[key]:
message = 'Detected CI on {} but could not determine the repo {}'.format(
info['ci'],
key,
)
if env_var:
message += '. You can manually pass in the {} with'.format(key)
message += ' with the {} environment variable.'.format(env_var)
raise ConfigError(message)
# Log any overrides detected through the environment as a warning
if len(info) > 1:
self.logger.info('')
self.logger.warn(
'Using environment variables to override repo info:'
)
keys = list(info.keys())
keys.sort()
for key in keys:
self.logger.warn(
' {}: {}'.format(key, info[key])
)
self.logger.info('')
self._repo_info = info
return self._repo_info
def _split_repo_url(self, url):
url_parts = url.split('/')
name = url_parts[-1]
owner = url_parts[-2]
if name.endswith('.git'):
name = name[:-4]
git_info = {
'url': url,
'owner': owner,
'name': name,
}
return git_info
@property
def repo_root(self):
path = self.repo_info.get('root')
if path:
return path
path = os.path.splitdrive(os.getcwd())[1]
while True:
if os.path.isdir(os.path.join(path, '.git')):
return path
head, tail = os.path.split(path)
if not tail:
# reached the root
break
path = head
@property
def repo_name(self):
name = self.repo_info.get('name')
if name:
return name
if not self.repo_root:
return
in_remote_origin = False
with open(os.path.join(self.repo_root, '.git', 'config'), 'r') as f:
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and line.find('url =') != -1:
return self._split_repo_url(line)['name']
@property
def repo_url(self):
url = self.repo_info.get('url')
if url:
return url
if not self.repo_root:
return
git_config_file = os.path.join(self.repo_root, '.git', 'config')
with open(git_config_file, 'r') as f:
in_remote_origin = False
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and 'url = ' in line:
return line[7:]
@property
def repo_owner(self):
owner = self.repo_info.get('owner')
if owner:
return owner
if not self.repo_root:
return
in_remote_origin = False
with open(os.path.join(self.repo_root, '.git', 'config'), 'r') as f:
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and line.find('url =') != -1:
line_parts = line.split('/')
return line_parts[-2].split(':')[-1]
@property
def repo_branch(self):
branch = self.repo_info.get('branch')
if branch:
return branch
if not self.repo_root:
return
with open(os.path.join(self.repo_root, '.git', 'HEAD'), 'r') as f:
branch_ref = f.read().strip()
if branch_ref.startswith('ref: '):
return '/'.join(branch_ref[5:].split('/')[2:])
@property
def repo_commit(self):
commit = self.repo_info.get('commit')
if commit:
return commit
if not self.repo_root:
return
branch = self.repo_branch
if not branch:
return
join_args = [self.repo_root, '.git', 'refs', 'heads']
join_args.extend(branch.split('/'))
commit_file = os.path.join(*join_args)
commit_sha = None
if os.path.isfile(commit_file):
with open(commit_file, 'r') as f:
commit_sha = f.read().strip()
else:
packed_refs_path = os.path.join(
self.repo_root,
'.git',
'packed-refs'
)
with open(packed_refs_path, 'r') as f:
for line in f:
parts = line.split(' ')
if len(parts) == 1:
# Skip lines showing the commit sha of a tag on the
# preceeding line
continue
if parts[1].replace('refs/remotes/origin/', '').strip() == branch:
commit_sha = parts[0]
break
return commit_sha
@property
def use_sentry(self):
try:
self.keychain.get_service('sentry')
return True
except ServiceNotConfigured:
return False
except ServiceNotValid:
return False
def init_sentry(self, ):
""" Initializes sentry.io error logging for this session """
if not self.use_sentry:
return
sentry_config = self.keychain.get_service('sentry')
tags = {
'repo': self.repo_name,
'branch': self.repo_branch,
'commit': self.repo_commit,
'cci version': cumulusci.__version__,
}
tags.update(self.config.get('sentry_tags', {}))
env = self.config.get('sentry_environment', 'CumulusCI CLI')
self.sentry = raven.Client(
dsn=sentry_config.dsn,
environment=env,
tags=tags,
processors=(
'raven.processors.SanitizePasswordsProcessor',
),
)
def get_github_api(self):
github_config = self.keychain.get_service('github')
gh = login(github_config.username, github_config.password)
return gh
def get_latest_version(self, beta=False):
""" Query Github Releases to find the latest production or beta release """
gh = self.get_github_api()
repo = gh.repository(self.repo_owner, self.repo_name)
latest_version = None
for release in repo.iter_releases():
if beta != release.tag_name.startswith(self.project__git__prefix_beta):
continue
version = self.get_version_for_tag(release.tag_name)
if version is None:
continue
version = LooseVersion(version)
if not latest_version or version > latest_version:
latest_version = version
return latest_version
@property
def config_project_path(self):
if not self.repo_root:
return
path = os.path.join(self.repo_root, self.config_filename)
if os.path.isfile(path):
return path
@property
def project_local_dir(self):
""" location of the user local directory for the project
e.g., ~/.cumulusci/NPSP-Extension-Test/ """
# depending on where we are in bootstrapping the YamlGlobalConfig
# the canonical projectname could be located in one of two places
if self.project__name:
name = self.project__name
else:
try:
name = self.config_project['project']['name']
except KeyError:
name = ''
path = os.path.join(
os.path.expanduser('~'),
self.global_config_obj.config_local_dir,
name,
)
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_tag_for_version(self, version):
if '(Beta' in version:
tag_version = version.replace(
' (', '-').replace(')', '').replace(' ', '_')
tag_name = self.project__git__prefix_beta + tag_version
else:
tag_name = self.project__git__prefix_release + version
return tag_name
def get_version_for_tag(self, tag, prefix_beta=None, prefix_release=None):
if prefix_beta is None:
prefix_beta = self.project__git__prefix_beta
if prefix_release is None:
prefix_release = self.project__git__prefix_release
if tag.startswith(prefix_beta):
version = tag.replace(prefix_beta, '')
if '-Beta_' in version:
# Beta tags are expected to be like "beta/1.0-Beta_1"
# which is returned as "1.0 (Beta 1)"
return version.replace('-', ' (').replace('_', ' ') + ')'
else:
return
elif tag.startswith(prefix_release):
return tag.replace(prefix_release, '')
def set_keychain(self, keychain):
self.keychain = keychain
def _check_keychain(self):
if not self.keychain:
raise KeychainNotFound(
'Could not find config.keychain. You must call ' +
'config.set_keychain(keychain) before accessing orgs')
def list_orgs(self):
""" Returns a list of all org names for the project """
self._check_keychain()
return self.keychain.list_orgs()
def get_org(self, name):
""" Returns an OrgConfig for the given org_name """
self._check_keychain()
return self.keychain.get_org(name)
def set_org(self, name, org_config):
""" Creates or updates an org's oauth info """
self._check_keychain()
return self.keychain.set_org(name, org_config)
def get_static_dependencies(self, dependencies=None):
""" Resolves the project -> dependencies section of cumulusci.yml
to convert dynamic github dependencies into static dependencies
by inspecting the referenced repositories
"""
if not dependencies:
dependencies = self.project__dependencies
if not dependencies:
return
static_dependencies = []
for dependency in dependencies:
if 'github' not in dependency:
static_dependencies.append(dependency)
else:
static = self.process_github_dependency(dependency)
static_dependencies.extend(static)
return static_dependencies
def pretty_dependencies(self, dependencies, indent=None):
if not indent:
indent = 0
pretty = []
for dependency in dependencies:
prefix = '{} - '.format(" " * indent)
for key, value in list(dependency.items()):
extra = []
if value is None or value is False:
continue
if key == 'dependencies':
extra = self.pretty_dependencies(
dependency['dependencies'], indent=indent + 4)
if not extra:
continue
value = '\n{}'.format(" " * (indent + 4))
pretty.append('{}{}: {}'.format(prefix, key, value))
if extra:
pretty.extend(extra)
prefix = '{} '.format(" " * indent)
return pretty
def process_github_dependency(self, dependency, indent=None):
if not indent:
indent = ''
self.logger.info(
'{}Processing dependencies from Github repo {}'.format(
indent,
dependency['github'],
)
)
skip = dependency.get('skip')
if not isinstance(skip, list):
skip = [skip, ]
# Initialize github3.py API against repo
gh = self.get_github_api()
repo_owner, repo_name = dependency['github'].split('/')[3:5]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
repo = gh.repository(repo_owner, repo_name)
# Prepare HTTP auth header for requests calls to Github
github = self.keychain.get_service('github')
headers = {'Authorization': 'token {}'.format(github.password)}
# Determine the ref if specified
kwargs = {}
if 'tag' in dependency:
tag = dependency['tag']
kwargs['ref'] = tag
else:
tag = None
# Get the cumulusci.yml file
contents = repo.contents('cumulusci.yml', **kwargs)
cumulusci_yml = hiyapyco.load(contents.decoded, loglevel='INFO')
# Get the namespace from the cumulusci.yml if set
namespace = cumulusci_yml.get('project', {}).get(
'package', {}).get('namespace')
# Check for unmanaged flag on a namespaced package
unmanaged = namespace and dependency.get('unmanaged') is True
# Look for subfolders under unpackaged/pre
unpackaged_pre = []
contents = repo.contents('unpackaged/pre', **kwargs)
if contents:
for dirname in list(contents.keys()):
if 'unpackaged/pre/{}'.format(dirname) in skip:
continue
subfolder = "{}-{}/unpackaged/pre/{}".format(
repo.name, repo.default_branch, dirname)
zip_url = "{}/archive/{}.zip".format(
repo.html_url, repo.default_branch)
unpackaged_pre.append({
'zip_url': zip_url,
'subfolder': subfolder,
'headers': headers,
'unmanaged': dependency.get('unmanaged'),
'namespace_tokenize': dependency.get('namespace_tokenize'),
'namespace_inject': dependency.get('namespace_inject'),
'namespace_strip': dependency.get('namespace_strip'),
})
# Look for metadata under src (deployed if no namespace)
unmanaged_src = None
if unmanaged or not namespace:
contents = repo.contents('src', **kwargs)
if contents:
zip_url = "{}/archive/{}.zip".format(
repo.html_url, repo.default_branch)
subfolder = "{}-{}/src".format(repo.name, repo.default_branch)
unmanaged_src = {
'zip_url': zip_url,
'subfolder': subfolder,
'headers': headers,
'unmanaged': dependency.get('unmanaged'),
'namespace_tokenize': dependency.get('namespace_tokenize'),
'namespace_inject': dependency.get('namespace_inject'),
'namespace_strip': dependency.get('namespace_strip'),
}
# Look for subfolders under unpackaged/post
unpackaged_post = []
contents = repo.contents('unpackaged/post', **kwargs)
if contents:
for dirname in list(contents.keys()):
if 'unpackaged/post/{}'.format(dirname) in skip:
continue
zip_url = "{}/archive/{}.zip".format(
repo.html_url, repo.default_branch)
subfolder = "{}-{}/unpackaged/post/{}".format(
repo.name, repo.default_branch, dirname)
dependency = {
'zip_url': zip_url,
'subfolder': subfolder,
'headers': headers,
'unmanaged': dependency.get('unmanaged'),
'namespace_tokenize': dependency.get('namespace_tokenize'),
'namespace_inject': dependency.get('namespace_inject'),
'namespace_strip': dependency.get('namespace_strip'),
}
# By default, we always inject the project's namespace into
# unpackaged/post metadata
if namespace and not dependency.get('namespace_inject'):
dependency['namespace_inject'] = namespace
dependency['unmananged'] = unmanaged
unpackaged_post.append(dependency)
# Parse values from the repo's cumulusci.yml
project = cumulusci_yml.get('project', {})
prefix_beta = project.get('git', {}).get('prefix_beta', 'beta/')
prefix_release = project.get('git', {}).get('prefix_release', 'release/')
dependencies = project.get('dependencies')
if dependencies:
dependencies = self.get_static_dependencies(dependencies)
# Create the final ordered list of all parsed dependencies
repo_dependencies = []
# unpackaged/pre/*
if unpackaged_pre:
repo_dependencies.extend(unpackaged_pre)
if namespace and not unmanaged:
version = None
if tag:
version = self.get_version_for_tag(tag, prefix_beta, prefix_release)
else:
# github3.py doesn't support the latest release API so we hack
# it together here
url = repo._build_url('releases/latest', base_url=repo._api)
try:
version = repo._get(url).json()['name']
except Exception as e:
self.logger.warn('{}{}: {}'.format(
indent, e.__class__.__name__, e.message))
if not version:
raise DependencyResolutionError(
'{}Could not find latest release for {}'.format(indent, namespace)
)
# If a latest prod version was found, make the dependencies a
# child of that install
dependency = {
'namespace': namespace,
'version': version,
}
if dependencies:
dependency['dependencies'] = dependencies
repo_dependencies.append(dependency)
# Unmanaged metadata from src (if referenced repo doesn't have a
# namespace)
else:
if dependencies:
repo_dependencies.extend(dependencies)
if unmanaged_src:
repo_dependencies.append(unmanaged_src)
# unpackaged/post/*
if unpackaged_post:
repo_dependencies.extend(unpackaged_post)
return repo_dependencies
|
|
'''
Created on Sep 20, 2011
@author: jlu
'''
from django.test import TestCase
from django.test.client import Client
from django.db import connection
from datetime import datetime
from django.contrib.auth.models import User
import json
import base64
from api import setup_func
from server.models import UserPoint, UserReward
class ServerTest(TestCase):
fixtures = ['testdata.json',]
def setUp(self):
#User.objects.create_user('testuser', 'my@test.com', 'testpassword')
self.extra = self.getAuthorizationHeader('testuser', 'ttttheman')
def getAuthorizationHeader(self, username, password):
auth = '%s:%s' % (username, password)
auth = 'Basic %s' % base64.encodestring(auth)
auth = auth.strip()
header = {
'HTTP_AUTHORIZATION': auth,
}
#print auth
return header
def test_user(self):
"""
Tests UserHandler
"""
c = Client()
'''
{"id":2}
'''
response = c.get("/api/auth", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual(2, r['id'], '')
'''
{"user_count":3}
'''
response = c.get("/api/users", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual(3, r['user_count'], '')
'''
{
"pref": {
"nearby_radius": 40.0
},
"userpoint": {
"points": 200
},
"userrewards": [
{
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"id": 1,
"address": "434 abc ave, san jose, ca",
"longitude": 201.323,
"latitude": 102.454,
"logo": "/path/to/logo.png"
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"expiration": "2012-03-12",
"forsale": false
},
{
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "StarBucks",
"id": 2,
"address": "101 abc ave, san jose, ca",
"longitude": 22.323,
"latitude": 44.454,
"logo": "/path2/to/logo.png"
},
"equiv_points": 10,
"name": "free starbucks",
"expire_in_days": 0,
"id": 2,
"expire_in_years": 3,
"equiv_dollar": "10",
"expire_in_months": 0,
"description": "free one cup of starbucks coffee"
},
"expiration": "2012-08-20",
"forsale": true
}
],
"user": {
"username": "testuser",
"first_name": "test",
"last_name": "user",
"email": "jun@cardmeleon.me"
},
"userprogresses": [
{
"merchant": {
"name": "Safeway",
"id": 1
},
"cur_times": 2,
"cur_dollar_amt": "50.25"
},
{
"merchant": {
"name": "StarBucks",
"id": 2
},
"cur_times": 200,
"cur_dollar_amt": "206.5"
}
],
"userprofile": {
"referer": {
"id": 3
},
"phone": "4082323232",
"facebook": null,
"deviceid": "abcdefg"
}
}
'''
response = c.get("/api/users/2", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(6, len(r), '')
self.assertEqual('testuser', r['user']['username'], '')
self.assertEqual(40.0, r['pref']['nearby_radius'], '')
self.assertEqual('4082323232', r['userprofile']['phone'], '')
self.assertEqual(2, r['userprogresses'][0]['cur_times'], '')
self.assertEqual('Safeway', r['userprogresses'][0]['merchant']['name'], '')
self.assertEqual('StarBucks', r['userprogresses'][1]['merchant']['name'], '')
self.assertEqual(200, r['userpoint']['points'], '')
self.assertEqual(2, len(r['userrewards']), '')
self.assertEqual('free bread', r['userrewards'][0]['reward']['name'], '')
self.assertEqual('Safeway', r['userrewards'][0]['reward']['merchant']['name'], '')
self.assertEqual('/path/to/logo.png', r['userrewards'][0]['reward']['merchant']['logo'], '')
self.assertAlmostEqual(201.323, r['userrewards'][0]['reward']['merchant']['longitude'], '')
self.assertEqual(10, r['userrewards'][1]['reward']['equiv_points'], '')
self.assertEqual(True, r['userrewards'][1]['forsale'], '')
self.assertEqual('StarBucks', r['userrewards'][1]['reward']['merchant']['name'], '')
self.assertAlmostEqual(44.454, r['userrewards'][1]['reward']['merchant']['latitude'], '')
self.assertEqual('/path2/to/logo.png', r['userrewards'][1]['reward']['merchant']['logo'], '')
jsonstr = json.dumps({"username":"xin","email":"xin@test.com","phone":"4082538985","referer":{"refer_code":1}})
response = c.post("/api/users", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
attrs = self.getAuthorizationHeader('jlu', 'ttttheman')
response = c.get("/api/users/82", **attrs)
#print response.content
r = json.loads(response.content)
self.assertEqual(6, len(r), '')
self.assertEqual('xin', r['user']['username'], '')
self.assertEqual('4082538985', r['userprofile']['phone'], '')
self.assertEqual('xin@test.com', r['user']['email'], '')
self.assertEqual('', r['user']['first_name'], '')
self.assertIsNone(r['userpoint'], '')
self.assertIsNone(r['pref'], '')
jsonstr = json.dumps({"username":"xin2","email":"xin2@test.com","phone":"4082538985"})
response = c.put("/api/users/82", jsonstr, 'application/json', **attrs)
#print response.content
self.assertEqual('OK', response.content, '')
response = c.get("/api/users/82", **attrs)
#print response.content
r = json.loads(response.content)
self.assertEqual(6, len(r), '')
self.assertEqual('xin', r['user']['username'], '')
self.assertEqual('4082538985', r['userprofile']['phone'], '')
self.assertEqual('xin2@test.com', r['user']['email'], '')
response = c.delete("/api/users/82", **attrs)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/users", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual(3, r['user_count'], '')
def test_userpref(self):
"""
Tests UserPrefHandler
"""
c = Client()
'''
{
"nearby_radius": 40.0
}
'''
response = c.get("/api/users/2/pref", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual(40.0, r['nearby_radius'], '')
response = c.delete("/api/users/2/pref", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/users/2/pref", **self.extra)
#print response
self.assertContains(response, "DoesNotExist: UserPref matching query does not exist.", status_code=500)
jsonstr = json.dumps({"nearby_radius":25.5})
response = c.post("/api/users/2/pref", jsonstr, 'application/json', **self.extra)
#print response.content
self.assertEqual("Created", response.content, '')
response = c.get("/api/users/2/pref", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual(25.5, r['nearby_radius'], '')
jsonstr = json.dumps({"nearby_radius":45.0})
response = c.put("/api/users/2/pref", jsonstr, 'application/json', **self.extra)
#print response.content
self.assertEqual('OK', response.content, '')
response = c.get("/api/users/2/pref", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual(45.0, r['nearby_radius'], '')
def test_userreview(self):
"""
Tests UserReviewHandler
"""
c = Client()
"""
[]
"""
response = c.get("/api/users/2/review", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
jsonstr = json.dumps({"merchant":{"id":1}, "review":"this merchant is awesome!", "rating":4.5})
response = c.post("/api/users/2/review", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
jsonstr = json.dumps({"merchant":{"id":2}, "review":"very good! will come back", "rating":2.0})
response = c.post("/api/users/2/review", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(83, r["id"], '')
jsonstr = json.dumps({"merchant":{"id":1}, "review":"nice food", "rating":3.5})
response = c.post("/api/users/2/review", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(84, r["id"], '')
"""
[
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "3.5",
"review": "this merchant is awesome!",
"user": {
"username": "testuser",
"id": 2
},
"time": null
},
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "4.5",
"review": "this merchant is awesome!",
"user": {
"username": "testuser",
"id": 2
},
"time": "2012-02-07"
},
{
"merchant": {
"name": "StarBucks",
"id": 2
},
"rating": "2.0",
"review": "very good! will come back",
"user": {
"username": "testuser",
"id": 2
},
"time": "2012-02-07"
},
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "3.5",
"review": "nice food",
"user": {
"username": "testuser",
"id": 2
},
"time": "2012-02-07"
}
]
"""
response = c.get("/api/users/2/review", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(4, len(r), '')
self.assertEqual("this merchant is awesome!", r[0]['review'], '')
self.assertEqual('3.5', r[0]['rating'], '')
self.assertEqual("this merchant is awesome!", r[1]['review'], '')
self.assertEqual('4.5', r[1]['rating'], '')
self.assertEqual("very good! will come back", r[2]['review'], '')
self.assertEqual(2.0, float(r[2]['rating']), '')
self.assertEqual("nice food", r[3]['review'], '')
self.assertEqual('3.5', r[3]['rating'], '')
"""
[
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "3.5",
"review": "this merchant is awesome!",
"user": {
"username": "testuser",
"id": 2
},
"time": null
},
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "4.0",
"review": "I love it!",
"user": {
"username": "testuser2",
"id": 3
},
"time": null
},
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "4.5",
"review": "this merchant is awesome!",
"user": {
"username": "testuser",
"id": 2
},
"time": "2012-02-07"
},
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "3.5",
"review": "nice food",
"user": {
"username": "testuser",
"id": 2
},
"time": "2012-02-07"
}
]
"""
response = c.get("/api/stores/1/review", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(4, len(r), '')
self.assertEqual("this merchant is awesome!", r[2]['review'], '')
self.assertEqual('4.5', r[2]['rating'], '')
self.assertEqual("nice food", r[3]['review'], '')
self.assertEqual('3.5', r[3]['rating'], '')
response = c.get("/api/stores/2/review", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual("very good! will come back", r[0]['review'], '')
self.assertEqual(2.0, float(r[0]['rating']), '')
response = c.delete("/api/users/2/review", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/users/2/review", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), '')
def test_merchant(self):
"""
Tests merchant handler
"""
c = Client()
setup_func(connection)
'''
[
{
"distance": 0.27995036763905656,
"name": "Safeway",
"longitude": 201.323,
"id": 1,
"phone": "6502334332",
"reward_trigger": 200.0,
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"email": "safeway@safeway.com",
"description": ""
}
]
'''
response = c.get("/api/stores/prox/201.32,102.45,1", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(2, len(r), '')
self.assertEqual('Safeway', r[0]['name'], '')
self.assertEqual('6502334332', r[0]['phone'], '')
self.assertEqual('/path/to/logo.png', r[0]['logo'], '')
self.assertEqual(1, r[0]['id'], '')
#self.assertGreater(1.0, r[0]['distance'], '')
#self.assertEqual(200.0, r[0]['reward_trigger'], '')
#self.assertEqual('', r[0]['description'], '')
"""
[
{
"name": "Safeway",
"longitude": 201.323,
"id": 1,
"phone": "6502334332",
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"email": "safeway@safeway.com"
},
{
"name": "StarBucks",
"longitude": 22.323,
"id": 2,
"phone": "4082334332",
"address": "101 abc ave, san jose, ca",
"latitude": 44.454,
"logo": "/path2/to/logo.png",
"email": "support@starbucks.com"
}
]
"""
response = c.get("/api/stores/prox/201.32,102.45,10", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(2, len(r), '')
self.assertEqual('Safeway', r[0]['name'], '')
self.assertEqual('6502334332', r[0]['phone'], '')
self.assertEqual('/path/to/logo.png', r[0]['logo'], '')
self.assertEqual(1, r[0]['id'], '')
#self.assertGreater(1.0, r[0]['distance'], '')
#self.assertEqual(200.0, r[0]['reward_trigger'], '')
#self.assertEqual('', r[0]['description'], '')
self.assertEqual('StarBucks', r[1]['name'], '')
self.assertEqual('4082334332', r[1]['phone'], '')
self.assertEqual('/path2/to/logo.png', r[1]['logo'], '')
self.assertEqual(2, r[1]['id'], '')
#self.assertGreater(1.0, r[1]['distance'], '')
#self.assertEqual(200.0, r[1]['reward_trigger'], '')
#self.assertEqual('', r[1]['description'], '')
'''
{
"name": "Safeway",
"rewardprogram_set": [
{
"status": 1,
"merchant": {
"name": "Safeway"
},
"name": "safeway loyalty program",
"prog_type": 1,
"reward_trigger": 200.0,
"end_time": null,
"reward": {
"equiv_points": 20,
"name": "free bread"
},
"start_time": null
},
{
"status": 1,
"merchant": {
"name": "Safeway"
},
"name": "safeway loyalty program 2",
"prog_type": 0,
"reward_trigger": 400.0,
"end_time": null,
"reward": {
"equiv_points": 10,
"name": "free starbucks"
},
"start_time": null
}
],
"longitude": 201.323,
"phone": "6502334332",
"userreview_set": [
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "3.5",
"review": "this merchant is awesome!",
"user": {
"username": "testuser",
"id": 2
},
"time": null
},
{
"merchant": {
"name": "Safeway",
"id": 1
},
"rating": "4.0",
"review": "I love it!",
"user": {
"username": "testuser2",
"id": 3
},
"time": null
}
],
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"email": "safeway@safeway.com"
}
'''
response = c.get("/api/stores/1", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(9, len(r), '')
self.assertEqual('Safeway', r['name'], '')
self.assertEqual('6502334332', r['phone'], '')
self.assertEqual('/path/to/logo.png', r['logo'], '')
self.assertEqual('safeway loyalty program', r['rewardprogram_set'][0]['name'], '')
self.assertEqual(200.0, r['rewardprogram_set'][0]['reward_trigger'], '')
self.assertEqual('free bread', r['rewardprogram_set'][0]['reward']['name'], '')
self.assertEqual('safeway loyalty program 2', r['rewardprogram_set'][1]['name'], '')
self.assertEqual(0, r['rewardprogram_set'][1]['prog_type'], '')
self.assertEqual(10, r['rewardprogram_set'][1]['reward']['equiv_points'], '')
self.assertEqual(2, len(r['userreview_set']), '')
jsonstr = json.dumps({"name":"BostonMarket","email":"xin@test.com","phone":"4082538985","address":"973 1st st, san jose, ca","logo":"/logo/bm.png","longitude":"150.20","latitude":"90.09"})
response = c.post("/api/stores", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
response = c.get("/api/stores/82", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(9, len(r), '')
self.assertEqual('BostonMarket', r['name'], '')
self.assertEqual('4082538985', r['phone'], '')
self.assertEqual('/logo/bm.png', r['logo'], '')
self.assertEqual('973 1st st, san jose, ca', r['address'], '')
jsonstr = json.dumps({"email":"bm@test.com","phone":"6509234325"})
response = c.put("/api/stores/82", jsonstr, 'application/json', **self.extra)
#print response.content
self.assertEqual('OK', response.content, '')
response = c.get("/api/stores/82", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(9, len(r), '')
self.assertEqual('BostonMarket', r['name'], '')
self.assertEqual('6509234325', r['phone'], '')
self.assertEqual('bm@test.com', r['email'], '')
response = c.delete("/api/stores/82", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/stores/82", **self.extra)
#print response.content
self.assertContains(response, "DoesNotExist: Merchant matching query does not exist.", status_code=500)
def test_purchase(self):
"""
Tests purchase handler
"""
c = Client()
time = str(datetime.now())
jsonstr = json.dumps({"time":time, "merchant":{"id":1}, "dollar_amount":20.50, "description":"test purchase"})
response = c.post('/api/users/2/purchase', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
'''
[
{
"dollar_amount": "20.5",
"merchant": {
"name": "Safeway"
},
"description": "test purchase",
"time": "2011-09-30 23:49:03"
}
]
'''
response = c.get('/api/users/2/purchase', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual('test purchase', r[0]['description'], '')
self.assertEqual('Safeway', r[0]['merchant']['name'], '')
response = c.delete('/api/users/2/purchase', **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
def test_rewardprogram(self):
"""
Tests rewardprogram handler
"""
c = Client()
setup_func(connection)
'''
{
"status": 1,
"merchant": {
"name": "Safeway"
},
"name": "safeway loyalty program",
"prog_type": 1,
"reward_trigger": 200.0,
"end_time": null,
"reward": {
"equiv_points": 20,
"name": "free bread"
},
"start_time": null
}
'''
response = c.get("/api/stores/1/program/1", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(8, len(r), '')
self.assertEqual('safeway loyalty program', r['name'], '')
self.assertEqual(1, r['prog_type'], '')
self.assertEqual(None, r['end_time'], '')
self.assertEqual(200.0, r['reward_trigger'], '')
'''
[
{
"status": 1,
"merchant": {
"name": "Safeway"
},
"name": "safeway loyalty program",
"prog_type": 1,
"reward_trigger": 200.0,
"end_time": null,
"reward": {
"equiv_points": 20,
"name": "free bread"
},
"start_time": null
},
{
"status": 1,
"merchant": {
"name": "Safeway"
},
"name": "safeway loyalty program 2",
"prog_type": 1,
"reward_trigger": 400.0,
"end_time": null,
"reward": {
"equiv_points": 10,
"name": "free starbucks"
},
"start_time": null
}
]
'''
response = c.get("/api/stores/1/program", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(2, len(r), 'number of merchant reward programs is not 2')
self.assertEqual('safeway loyalty program', r[0]['name'], '')
self.assertEqual(1, r[0]['prog_type'], '')
self.assertEqual(None, r[0]['end_time'], '')
self.assertEqual(200.0, r[0]['reward_trigger'], '')
self.assertEqual('safeway loyalty program 2', r[1]['name'], '')
self.assertEqual(0, r[1]['prog_type'], '')
self.assertEqual(None, r[1]['end_time'], '')
self.assertEqual(400.0, r[1]['reward_trigger'], '')
jsonstr = json.dumps({"name":"BostonMarket loyalty program","status":1,"prog_type":1,"reward_trigger":150.0,"end_time":"2012-05-26","reward":{"id":1}})
response = c.post("/api/stores/1/program", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
response = c.get("/api/stores/1/program/82", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(8, len(r), '')
self.assertEqual('BostonMarket loyalty program', r['name'], '')
self.assertEqual(1, r['prog_type'], '')
self.assertEqual("2012-05-26", r['end_time'], '')
self.assertEqual(150.0, r['reward_trigger'], '')
self.assertEqual("free bread", r['reward']['name'], '')
jsonstr = json.dumps({"prog_type":2,"reward_trigger":10,"reward":{"id":2}})
response = c.put("/api/stores/1/program/82", jsonstr, 'application/json', **self.extra)
#print response.content
self.assertEqual('OK', response.content, '')
response = c.get("/api/stores/1/program/82", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(8, len(r), '')
self.assertEqual('BostonMarket loyalty program', r['name'], '')
self.assertEqual(2, r['prog_type'], '')
self.assertEqual("2012-05-26", r['end_time'], '')
self.assertEqual(10, r['reward_trigger'], '')
self.assertEqual("free starbucks", r['reward']['name'], '')
response = c.delete("/api/stores/1/program/82", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/stores/1/program", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(2, len(r), 'number of merchant reward programs is not 2')
response = c.delete("/api/stores/1/program", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/stores/1/program", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), 'number of merchant reward programs is not 0')
def test_reward(self):
'''
Test RewardHandler
'''
c = Client()
setup_func(connection)
'''
{
"status": 1,
"merchant": {
"name": "Safeway",
"id": 1,
"address": "434 abc ave, san jose, ca",
"longitude": 201.323,
"latitude": 102.454
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20",
"expire_in_months": 0,
"description": "free whole-wheet bread"
}
'''
response = c.get("/api/stores/1/reward/1", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(10, len(r), '')
self.assertEqual('free bread', r['name'], '')
self.assertEqual(20, r['equiv_points'], '')
self.assertEqual(3, r['expire_in_years'], '')
self.assertEqual('Safeway', r['merchant']['name'], '')
self.assertEqual('434 abc ave, san jose, ca', r['merchant']['address'], '')
self.assertAlmostEqual(201.323, r['merchant']['longitude'], '')
'''
[
{
"status": 1,
"merchant": {
"name": "Safeway",
"id": 1,
"address": "434 abc ave, san jose, ca",
"longitude": 201.323,
"latitude": 102.454
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20",
"expire_in_months": 0,
"description": "free whole-wheet bread"
}
]
'''
response = c.get("/api/stores/1/reward", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), 'number of merchant rewards is not 1')
self.assertEqual('free bread', r[0]['name'], '')
self.assertEqual(20, r[0]['equiv_points'], '')
self.assertEqual(3, r[0]['expire_in_years'], '')
self.assertEqual('Safeway', r[0]['merchant']['name'], '')
self.assertEqual('434 abc ave, san jose, ca', r[0]['merchant']['address'], '')
self.assertAlmostEqual(201.323, r[0]['merchant']['longitude'], '')
jsonstr = json.dumps({"name":"free meal","status":1,"equiv_dollar":30,"equiv_points":30,"expire_in_days":"100","expire_in_years":"1","expire_in_months":"0","description":"free meal only"})
response = c.post("/api/stores/1/reward", jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
response = c.get("/api/stores/1/reward/82", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(10, len(r), '')
self.assertEqual('free meal', r['name'], '')
self.assertEqual(30, r['equiv_points'], '')
self.assertEqual(1, r['expire_in_years'], '')
self.assertEqual('Safeway', r['merchant']['name'], '')
jsonstr = json.dumps({"equiv_points":50,"expire_in_months":5})
response = c.put("/api/stores/1/reward/82", jsonstr, 'application/json', **self.extra)
#print response.content
self.assertEqual('OK', response.content, '')
response = c.get("/api/stores/1/reward/82", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(10, len(r), '')
self.assertEqual('free meal', r['name'], '')
self.assertEqual(50, r['equiv_points'], '')
self.assertEqual(5, r['expire_in_months'], '')
self.assertEqual('Safeway', r['merchant']['name'], '')
response = c.delete("/api/stores/1/reward/82", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/stores/1/reward", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), 'number of merchant reward rewards is not 1')
response = c.delete("/api/stores/1/reward", **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get("/api/stores/1/reward", **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), 'number of merchant reward rewards is not 0')
def test_userreward(self):
'''
Test UserRewardHandler
'''
c = Client()
response = c.get('/api/users/reward', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(4, len(r), '')
'''
[
{
"id": 1,
"user": {
"username": "testuser2",
"id": 3
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"longitude": 201.323,
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"id": 1
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20.00",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"expiration": "2012-08-15",
"forsale": true
},
{
"id": 2,
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "StarBucks",
"longitude": 22.323,
"address": "101 abc ave, san jose, ca",
"latitude": 44.454,
"logo": "/path2/to/logo.png",
"id": 2
},
"equiv_points": 10,
"name": "free starbucks",
"expire_in_days": 0,
"id": 2,
"expire_in_years": 3,
"equiv_dollar": "10.00",
"expire_in_months": 0,
"description": "free one cup of starbucks coffee"
},
"expiration": "2012-08-20",
"forsale": true
}
]
'''
response = c.get('/api/users/reward/forsell', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(2, len(r), '')
self.assertEqual('free one cup of starbucks coffee', r[1]['reward']['description'], '')
self.assertEqual('testuser', r[1]['user']['username'], '')
self.assertEqual(10, r[1]['reward']['equiv_points'], '')
self.assertEqual(True, r[1]['forsale'], '')
self.assertEqual(2, r[1]['id'], '')
self.assertEqual('2012-08-20', r[1]['expiration'], '')
self.assertEqual('StarBucks', r[1]['reward']['merchant']['name'], '')
self.assertEqual('101 abc ave, san jose, ca', r[1]['reward']['merchant']['address'], '')
self.assertEqual('free whole-wheet bread', r[0]['reward']['description'], '')
self.assertEqual('testuser2', r[0]['user']['username'], '')
self.assertEqual(20, r[0]['reward']['equiv_points'], '')
self.assertEqual(True, r[0]['forsale'], '')
self.assertEqual(3, r[0]['id'], '')
self.assertEqual('2012-08-15', r[0]['expiration'], '')
self.assertEqual('Safeway', r[0]['reward']['merchant']['name'], '')
self.assertEqual('434 abc ave, san jose, ca', r[0]['reward']['merchant']['address'], '')
jsonstr = json.dumps({"merchant_id":1, "rewardprogram_id":1})
response = c.post('/api/users/2/reward', jsonstr, 'application/json', **self.extra)
#print response.content
self.assertContains(response, "user hasn't made enough purchases to be eligible for a reward")
jsonstr = json.dumps({"merchant_id":1, "rewardprogram_id":1})
response = c.post('/api/users/2/reward/1234', jsonstr, 'application/json', **self.extra)
#print response.content
self.assertContains(response, "Wrong free_code! user is not eligible for reward")
jsonstr = json.dumps({"merchant_id":1, "rewardprogram_id":1})
response = c.post('/api/users/2/reward/2011', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
jsonstr = json.dumps({"merchant_id":2, "rewardprogram_id":2})
response = c.post('/api/users/2/reward', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(83, r["id"], '')
jsonstr = json.dumps({"forsale":True, "userreward_id":83})
response = c.put('/api/users/2/reward', jsonstr, 'application/json', **self.extra)
#print response.content
self.assertEqual('OK', response.content, '')
'''
[
{
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"longitude": 201.323,
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"id": 1
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20.00",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"expiration": "2012-03-12",
"forsale": false
},
{
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "StarBucks",
"longitude": 22.323,
"address": "101 abc ave, san jose, ca",
"latitude": 44.454,
"logo": "/path2/to/logo.png",
"id": 2
},
"equiv_points": 10,
"name": "free starbucks",
"expire_in_days": 0,
"id": 2,
"expire_in_years": 3,
"equiv_dollar": "10.00",
"expire_in_months": 0,
"description": "free one cup of starbucks coffee"
},
"expiration": "2012-08-20",
"forsale": true
},
{
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"longitude": 201.323,
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"id": 1
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20.00",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"expiration": "2015-02-06",
"forsale": false
},
{
"user": {
"username": "testuser",
"id": 2
},
"reward": {
"status": 1,
"merchant": {
"name": "StarBucks",
"longitude": 22.323,
"address": "101 abc ave, san jose, ca",
"latitude": 44.454,
"logo": "/path2/to/logo.png",
"id": 2
},
"equiv_points": 10,
"name": "free starbucks",
"expire_in_days": 0,
"id": 2,
"expire_in_years": 3,
"equiv_dollar": "10.00",
"expire_in_months": 0,
"description": "free one cup of starbucks coffee"
},
"expiration": "2015-02-06",
"forsale": true
}
]
'''
response = c.get('/api/users/2/reward', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(4, len(r), '')
self.assertEqual('free one cup of starbucks coffee', r[1]['reward']['description'], '')
self.assertEqual('testuser', r[1]['user']['username'], '')
self.assertEqual(10, r[1]['reward']['equiv_points'], '')
self.assertEqual(True, r[1]['forsale'], '')
self.assertEqual('2012-08-20', r[1]['expiration'], '')
self.assertEqual('free whole-wheet bread', r[0]['reward']['description'], '')
self.assertEqual('testuser', r[0]['user']['username'], '')
self.assertEqual(20, r[0]['reward']['equiv_points'], '')
self.assertEqual(False, r[0]['forsale'], '')
self.assertEqual('2012-03-12', r[0]['expiration'], '')
self.assertEqual('free whole-wheet bread', r[2]['reward']['description'], '')
self.assertEqual('testuser', r[2]['user']['username'], '')
self.assertEqual(20, r[2]['reward']['equiv_points'], '')
self.assertEqual(False, r[2]['forsale'], '')
#self.assertEqual('2015-02-06', r[2]['expiration'], '')
self.assertEqual('free one cup of starbucks coffee', r[3]['reward']['description'], '')
self.assertEqual('testuser', r[3]['user']['username'], '')
self.assertEqual(10, r[3]['reward']['equiv_points'], '')
self.assertEqual(True, r[3]['forsale'], '')
#self.assertEqual('2015-02-06', r[2]['expiration'], '')
response = c.delete('/api/users/2/reward', **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get('/api/users/2/reward', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), '')
def test_trade(self):
"""
Tests trade activity handler
"""
c = Client()
jsonstr = json.dumps({"userreward_id":3, "description":"test buy"})
response = c.post('/api/users/2/buy', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
'''
[
{
"description": "test buy",
"points_value": 20,
"time": "2011-10-10 01:25:10",
"to_user": {
"username": "testuser",
"first_name": "test",
"last_name": "user",
"email": "jun@cardmeleon.me"
},
"from_user": {
"username": "testuser2",
"first_name": "test2",
"last_name": "user2",
"email": "jun@cardmeleon.me"
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"id": 1
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"activity_type": 2
}
]
'''
response = c.get('/api/users/2/buy', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual('test buy', r[0]['description'], '')
self.assertEqual(20, r[0]['points_value'], '')
self.assertEqual('testuser2', r[0]['from_user']['username'], '')
self.assertEqual('testuser', r[0]['to_user']['username'], '')
self.assertEqual(20, r[0]['reward']['equiv_points'], '')
self.assertEqual('free bread', r[0]['reward']['name'], '')
self.assertEqual(2, r[0]['activity_type'], '')
buyerPoint = UserPoint.objects.get(user__id=2)
sellerPoint = UserPoint.objects.get(user__id=3)
userrewards = UserReward.objects.filter(user__id=2, reward__id=1)
self.assertEqual(180, buyerPoint.points, '')
self.assertEqual(170, sellerPoint.points, '')
self.assertEqual(2, len(userrewards), '')
self.assertEqual(False, userrewards[0].forsale, '')
self.assertEqual(False, userrewards[1].forsale, '')
response = c.delete('/api/users/2/buy', **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get('/api/users/2/buy', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), '')
def test_gift(self):
"""
Tests gift activity handler
"""
c = Client()
jsonstr = json.dumps({"userreward_id":1, "to_user":{'id':3}, "description":"test gifting"})
response = c.post('/api/users/2/gift', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["gift_code"], '')
jsonstr = json.dumps({"userreward_id":2, "description":"test gifting for non-member"})
response = c.put('/api/users/2/gift', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(83, r['gift_code'], '')
'''
[
{
"description": "test gifting for non-member",
"points_value": 10,
"time": "2012-01-27 04:04:06",
"to_user": null,
"from_user": {
"username": "testuser",
"first_name": "test",
"last_name": "user",
"email": "jun@cardmeleon.me"
},
"reward": {
"status": 1,
"merchant": {
"name": "StarBucks",
"longitude": 22.323,
"address": "101 abc ave, san jose, ca",
"latitude": 44.454,
"logo": "/path2/to/logo.png",
"id": 2
},
"equiv_points": 10,
"name": "free starbucks",
"expire_in_days": 0,
"id": 2,
"expire_in_years": 3,
"equiv_dollar": "10",
"expire_in_months": 0,
"description": "free one cup of starbucks coffee"
},
"activity_type": 3
},
{
"description": "test gifting",
"points_value": 20,
"time": "2012-01-27 04:04:06",
"to_user": {
"username": "testuser2",
"first_name": "test2",
"last_name": "user2",
"email": "jun@cardmeleon.me"
},
"from_user": {
"username": "testuser",
"first_name": "test",
"last_name": "user",
"email": "jun@cardmeleon.me"
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"longitude": 201.323,
"address": "434 abc ave, san jose, ca",
"latitude": 102.454,
"logo": "/path/to/logo.png",
"id": 1
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"activity_type": 3
}
]
'''
response = c.get('/api/users/2/gift', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(2, len(r), '')
self.assertEqual('test gifting for non-member', r[0]['description'], '')
self.assertEqual(10, r[0]['points_value'], '')
self.assertEqual('testuser', r[0]['from_user']['username'], '')
self.assertIsNone(r[0]['to_user'], '')
self.assertEqual(10, r[0]['reward']['equiv_points'], '')
self.assertEqual('free starbucks', r[0]['reward']['name'], '')
self.assertEqual(3, r[0]['activity_type'], '')
self.assertEqual('test gifting', r[1]['description'], '')
self.assertEqual(20, r[1]['points_value'], '')
self.assertEqual('testuser', r[1]['from_user']['username'], '')
self.assertEqual('testuser2', r[1]['to_user']['username'], '')
self.assertEqual(20, r[1]['reward']['equiv_points'], '')
self.assertEqual('free bread', r[1]['reward']['name'], '')
self.assertEqual(3, r[1]['activity_type'], '')
gifterPoint = UserPoint.objects.get(user__id=2)
gifteePoint = UserPoint.objects.get(user__id=3)
gifterrewards = UserReward.objects.filter(user__id=2, reward__id=1)
gifteerewards = UserReward.objects.filter(user__id=3, reward__id=1)
self.assertEqual(200, gifterPoint.points, '')
self.assertEqual(150, gifteePoint.points, '')
self.assertEqual(0, len(gifterrewards), '')
self.assertEqual(2, len(gifteerewards), '')
self.assertEqual(False, gifteerewards[0].forsale, '')
self.assertEqual(True, gifteerewards[1].forsale, '')
response = c.delete('/api/users/2/gift', **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get('/api/users/2/gift', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), '')
def test_redeem(self):
"""
Tests redeem activity handler
"""
c = Client()
jsonstr = json.dumps({"userreward_id":1, "description":"test redeem"})
response = c.post('/api/users/2/redeem', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r["id"], '')
'''
[
{
"description": "test redeem",
"points_value": 20,
"time": "2011-10-02 02:08:27",
"to_user": null,
"from_user": {
"username": "ttttheman",
"phone": "4082323232",
"facebook": null,
"email": "ttttheman@test.com",
"referer": {
"id": 2
}
},
"reward": {
"status": 1,
"merchant": {
"name": "Safeway",
"id": 1
},
"equiv_points": 20,
"name": "free bread",
"expire_in_days": 0,
"id": 1,
"expire_in_years": 3,
"equiv_dollar": "20",
"expire_in_months": 0,
"description": "free whole-wheet bread"
},
"activity_type": 1
}
]
'''
response = c.get('/api/users/2/redeem', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(1, len(r), '')
self.assertEqual('test redeem', r[0]['description'], '')
self.assertEqual(20, r[0]['points_value'], '')
self.assertEqual('testuser', r[0]['from_user']['username'], '')
self.assertEqual(None, r[0]['to_user'], '')
self.assertEqual(20, r[0]['reward']['equiv_points'], '')
self.assertEqual('free bread', r[0]['reward']['name'], '')
self.assertEqual(1, r[0]['activity_type'], '')
userrewards = UserReward.objects.filter(user__id=2, reward__id=1)
self.assertEqual(0, len(userrewards), '')
response = c.delete('/api/users/2/redeem', **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get('/api/users/2/redeem', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), '')
def test_refer(self):
"""
Tests referral activity handler
"""
c = Client()
"""
[
{"referee_name":"Jun Lu", "refer_code":1},
{"referee_name":"Yi Li", "refer_code":2},
{"referee_name":"Xin Han", "refer_code":3}
]
"""
jsonstr = json.dumps([
{"referee_name":"Jun Lu", "refer_method":1},
{"referee_name":"Yi Li", "refer_method":1},
{"referee_name":"Xin Han", "refer_method":1}
]);
response = c.post('/api/users/2/refer', jsonstr, 'application/json', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(82, r[0]['refer_code'], '')
self.assertEqual(83, r[1]['refer_code'], '')
self.assertEqual(84, r[2]['refer_code'], '')
'''
[
{
"referee_name": "Xin Han",
"referee_join_time": null,
"refer_method": 1,
"time": "2012-01-15 03:35:29"
},
{
"referee_name": "Yi Li",
"referee_join_time": null,
"refer_method": 1,
"time": "2012-01-15 03:35:29"
},
{
"referee_name": "Jun Lu",
"referee_join_time": null,
"refer_method": 1,
"time": "2012-01-15 03:35:29"
}
]
'''
response = c.get('/api/users/2/refer', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(3, len(r), '')
self.assertEqual('Xin Han', r[0]['referee_name'], '')
self.assertEqual(None, r[0]['referee_join_time'], '')
self.assertEqual(1, r[0]['refer_method'], '')
self.assertEqual('Yi Li', r[1]['referee_name'], '')
self.assertEqual(None, r[1]['referee_join_time'], '')
self.assertEqual(1, r[1]['refer_method'], '')
self.assertEqual('Jun Lu', r[2]['referee_name'], '')
self.assertEqual(None, r[2]['referee_join_time'], '')
self.assertEqual(1, r[2]['refer_method'], '')
response = c.delete('/api/users/2/refer', **self.extra)
#print response.content
self.assertEqual(0, len(response.content), '')
response = c.get('/api/users/2/refer', **self.extra)
#print response.content
r = json.loads(response.content)
self.assertEqual(0, len(r), '')
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the loss scaling optimizer class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.keras import backend
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.mixed_precision import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.experimental import mixed_precision
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
class _UnwrapPreventer(object):
"""Wrapper that DistributionStrategy will not unwrap.
Typically, DistributionStrategy will unwrap values when going from a cross-
replica context to a replica context via `call_for_each_replica`. This class
is a wrapper that DistributionStrategy will not unwrap, so it can be used to
prevent it from unwrapping a value.
TODO(reedwm): Find/implement a better way of preventing values from being
unwrapped by DistributionStrategy
"""
__slots__ = ['value']
def __init__(self, value):
self.value = value
class _DelegatingTrackableMixin(object):
"""A mixin that delegates all Trackable methods to another trackable object.
This class must be used with multiple inheritance. A class that subclasses
Trackable can also subclass this class, which causes all Trackable methods to
be delegated to the trackable object passed in the constructor.
A subclass can use this mixin to appear as if it were the trackable passed to
the constructor, from a Checkpoint's perspective. LossScaleOptimizer uses this
mixin, so that the checkpoint format for a LossScaleOptimizer is identical to
the checkpoint format for a normal optimizer. This allows a model to be saved
with a normal Optimizer and restored with a LossScaleOptimizer, or vice versa.
The only difference in checkpoint format is that the loss scale is also saved
with a LossScaleOptimizer.
"""
def __init__(self, trackable_obj):
self._trackable = trackable_obj
# pylint: disable=protected-access
@property
def _setattr_tracking(self):
return self._trackable._setattr_tracking
@_setattr_tracking.setter
def _setattr_tracking(self, value):
self._trackable._setattr_tracking = value
@property
def _update_uid(self):
return self._trackable._update_uid
@_update_uid.setter
def _update_uid(self, value):
self._trackable._update_uid = value
@property
def _unconditional_checkpoint_dependencies(self):
return self._trackable._unconditional_checkpoint_dependencies
@property
def _unconditional_dependency_names(self):
return self._trackable._unconditional_dependency_names
@property
def _name_based_restores(self):
return self._trackable._name_based_restores
def _maybe_initialize_trackable(self):
return self._trackable._maybe_initialize_trackable()
@property
def _object_identifier(self):
return self._trackable._object_identifier
@property
def _tracking_metadata(self):
return self._trackable._tracking_metadata
def _no_dependency(self, value):
return self._trackable._no_dependency(value)
def _name_based_attribute_restore(self, checkpoint):
return self._trackable._name_based_attribute_restore(checkpoint)
@property
def _checkpoint_dependencies(self):
return self._trackable._checkpoint_dependencies
@property
def _deferred_dependencies(self):
return self._trackable._deferred_dependencies
def _lookup_dependency(self, name):
self._trackable._lookup_dependency(name)
def _add_variable_with_custom_getter(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
getter=None,
overwrite=False,
**kwargs_for_getter):
return self._trackable._add_variable_with_custom_getter(
name, shape, dtype, initializer, getter, overwrite, **kwargs_for_getter)
def _preload_simple_restoration(self, name):
return self._trackable._preload_simple_restoration(name)
def _track_trackable(self, trackable, name, overwrite=False): # pylint: disable=redefined-outer-name
return self._trackable._track_trackable(trackable, name, overwrite)
def _handle_deferred_dependencies(self, name, trackable): # pylint: disable=redefined-outer-name
return self._trackable._handle_deferred_dependencies(name, trackable)
def _restore_from_checkpoint_position(self, checkpoint_position):
return self._trackable._restore_from_checkpoint_position(
checkpoint_position)
def _single_restoration_from_checkpoint_position(self, checkpoint_position,
visit_queue):
return self._trackable._single_restoration_from_checkpoint_position(
checkpoint_position, visit_queue)
def _gather_saveables_for_checkpoint(self):
return self._trackable._gather_saveables_for_checkpoint()
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return self._trackable._list_extra_dependencies_for_serialization(
serialization_cache)
def _list_functions_for_serialization(self, serialization_cache):
return self._trackable._list_functions_for_serialization(
serialization_cache)
# pylint: enable=protected-access
def _is_all_finite(grads):
"""Returns a scalar boolean tensor indicating if all gradients are finite."""
is_finite_per_grad = [
math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None
]
return math_ops.reduce_all(is_finite_per_grad)
def _op_in_graph_mode(tensor):
"""Returns the tensor's op in graph mode, or the tensor in eager mode.
This is useful because sometimes an op is needed in graph mode instead of a
tensor. In eager mode, there are no ops.
Args:
tensor: A tensor.
Returns:
The tensor's op in graph mode. The tensor in eager mode.
"""
if context.executing_eagerly():
return tensor
return tensor.op
def _assign_if_finite(var, value):
"""Assigns a value to a variable if the value is finite."""
return control_flow_ops.cond(
math_ops.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)),
control_flow_ops.no_op)
class _DynamicLossScaleState(trackable.Trackable):
"""The state of a dynamic loss scale."""
def __init__(self,
initial_loss_scale,
growth_steps,
multiplier):
"""Creates the dynamic loss scale."""
super(_DynamicLossScaleState, self).__init__()
self._initial_loss_scale = float(initial_loss_scale)
self._growth_steps = int(growth_steps)
self._multiplier = float(multiplier)
self._weights = {}
self._current_loss_scale = self._add_weight(
name='current_loss_scale',
dtype=dtypes.float32,
initial_value=self._initial_loss_scale)
# The number of consecutive steps with finite gradients since the last
# nonfinite gradient or change in loss scale. The name is 'good_steps' for
# backwards compatibility with older checkpoints.
self._counter = self._add_weight(
name='good_steps', dtype=dtypes.int64, initial_value=0)
def _add_weight(self, name, initial_value, dtype=None):
"""Adds a weight to this loss scale.
Args:
name: Variable name.
initial_value: The variable's initial value.
dtype: The type of the variable.
Returns:
A variable.
Raises:
RuntimeError: If a weight with `name` has already been added.
"""
variable = variable_scope.variable(
initial_value=initial_value,
name=name,
dtype=dtype,
trainable=False,
use_resource=True,
synchronization=variables.VariableSynchronization.AUTO,
# Set aggregation to NONE, as loss scaling variables should never be
# aggregated.
aggregation=variables.VariableAggregation.NONE)
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
key = (name, graph_key)
self._weights[key] = variable
self._handle_deferred_dependencies(name=name, trackable=variable)
backend.track_variable(variable)
return variable
@property
def _checkpoint_dependencies(self):
"""From Trackable. Gather graph-specific weights to save."""
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
weights = []
for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):
if g == graph_key:
weights.append(trackable.TrackableReference(name=name, ref=v))
return (super(_DynamicLossScaleState, self)._checkpoint_dependencies +
weights)
def _lookup_dependency(self, name):
"""From Trackable. Find a weight in the current graph."""
unconditional = super(_DynamicLossScaleState, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
return self._weights.get((name, graph_key), None)
@property
def initial_loss_scale(self):
return self._initial_loss_scale
@property
def growth_steps(self):
return self._growth_steps
@property
def multiplier(self):
return self._multiplier
@property
def current_loss_scale(self):
"""Returns the current loss scale as a float32 `tf.Variable`."""
return self._current_loss_scale
@property
def counter(self):
"""Returns the counter as a float32 `tf.Variable`."""
return self._counter
def __call__(self):
"""Returns the current loss scale as a scalar `float32` tensor."""
return ops.convert_to_tensor_v2_with_dispatch(self._current_loss_scale)
def update(self, grads):
"""Updates the value of the loss scale.
Args:
grads: A nested structure of unscaled gradients, each which is the
gradient of the loss with respect to a weight.
Returns:
update_op: In eager mode, None. In graph mode, an op to update the loss
scale.
should_apply_gradients: Either a bool or a scalar boolean tensor. If
False, the caller should skip applying `grads` to the variables this
step.
"""
grads = nest.flatten(grads)
if distribution_strategy_context.has_strategy():
distribution = distribution_strategy_context.get_strategy()
def get_is_finite(grads):
is_finite = _is_all_finite(grads)
# We cast to float, because we cannot reduce booleans with
# DistributionStrategy.
return math_ops.cast(is_finite, dtypes.float32)
is_finite_float = distribution.extended.call_for_each_replica(
get_is_finite, args=(grads,))
reduced_is_finite_float = distribution.reduce(reduce_util.ReduceOp.SUM,
is_finite_float, axis=None)
is_finite = math_ops.equal(reduced_is_finite_float,
distribution.num_replicas_in_sync)
else:
is_finite = _is_all_finite(grads)
def update_if_finite_grads():
"""Update assuming the gradients are finite."""
def incr_loss_scale():
new_loss_scale = self.current_loss_scale * self.multiplier
return control_flow_ops.group(
_assign_if_finite(self.current_loss_scale, new_loss_scale),
self.counter.assign(0))
return control_flow_ops.cond(
self.counter + 1 >= self.growth_steps,
incr_loss_scale,
lambda: _op_in_graph_mode(self.counter.assign_add(1)))
def update_if_not_finite_grads():
"""Update assuming the gradients are nonfinite."""
new_loss_scale = math_ops.maximum(
self.current_loss_scale / self.multiplier, 1)
return control_flow_ops.group(
self.counter.assign(0),
self.current_loss_scale.assign(new_loss_scale))
update_op = control_flow_ops.cond(is_finite, update_if_finite_grads,
update_if_not_finite_grads)
should_apply_gradients = is_finite
return update_op, should_apply_gradients
# See LossScaleOptimizer docstring for why this is so big
_DEFAULT_INITIAL_SCALE = 2 ** 15
_DEFAULT_GROWTH_STEPS = 2000
# pylint: disable=g-classes-have-attributes
@keras_export('keras.mixed_precision.LossScaleOptimizer')
class LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling to prevent numeric underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
intermediate gradients to be scaled by the loss scale as well. The final
gradients are divided (or "unscaled") by the loss scale to bring them back to
their original value.
`LossScaleOptimizer` wraps another optimizer and applies loss scaling to it.
By default, the loss scale is dynamically updated over time so you do not have
to choose the loss scale. The `minimize` method automatically scales the loss,
unscales the gradients, and updates the loss scale so all you have to do is
wrap your optimizer with a `LossScaleOptimizer` if you use `minimize`. For
example:
>>> opt = tf.keras.optimizers.SGD(0.25)
>>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
>>> var = tf.Variable(1.)
>>> loss_fn = lambda: var ** 2
>>> # 'minimize' applies loss scaling and updates the loss sale.
>>> opt.minimize(loss_fn, var_list=var)
>>> var.numpy()
0.5
If a `tf.GradientTape` is used to compute gradients instead of `minimize`, you
must scale the loss and gradients manually. This can be done with the
`LossScaleOptimizer.get_scaled_loss` and
`LossScaleOptimizer.get_unscaled_gradients` methods. For example:
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
... scaled_loss = opt.get_scaled_loss(loss)
>>> scaled_grad = tape.gradient(scaled_loss, var)
>>> (grad,) = opt.get_unscaled_gradients([scaled_grad])
>>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here
>>> var.numpy()
0.25
Warning: If you forget to call `get_scaled_loss` or `get_unscaled_gradients`
(or both) when using a `tf.GradientTape`, the model will likely converge to a
worse quality. Please make sure you call each function exactly once.
When mixed precision with float16 is used, there is typically no risk of
underflow affecting model quality if loss scaling is properly used. See
[the mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for more information
on how to use mixed precision.
Args:
inner_optimizer: The `tf.keras.optimizers.Optimizer` instance to wrap.
dynamic: Bool indicating whether dynamic loss scaling is used. Defaults to
True. If True, the loss scale will be dynamically updated over time using
an algorithm that keeps the loss scale at approximately its optimal value.
If False, a single fixed loss scale is used and `initial_scale` must be
specified, which is used as the loss scale. Recommended to keep as True,
as choosing a fixed loss scale can be tricky. Currently, there is a small
performance overhead to dynamic loss scaling compared to fixed loss
scaling.
initial_scale: The initial loss scale. If `dynamic` is True, this defaults
to `2 ** 15`. If `dynamic` is False, this must be specified and acts as
the sole loss scale, as the loss scale does not change over time. When
dynamic loss scaling is used, is better for this to be a very high number,
because a loss scale that is too high gets lowered far more quickly than a
loss scale that is too low gets raised.
dynamic_growth_steps: With dynamic loss scaling, every
`dynamic_growth_steps` steps with finite gradients, the loss scale is
doubled. Defaults to 2000. If a nonfinite gradient is encountered, the
count is reset back to zero, gradients are skipped that step, and the loss
scale is halved. The count can be queried with
`LossScaleOptimizer.dynamic_counter`. This argument can only be specified
if `dynamic` is True.
`LossScaleOptimizer` will occasionally skip applying gradients to the
variables, in which case the trainable variables will not change that step.
This is done because the dynamic loss scale will sometimes be raised too
high, causing overflow in the gradients. Typically, the first 2 to 15 steps of
the model are skipped as the initial loss scale is very high, but afterwards
steps will only be skipped on average 0.05% of the time (the fraction of steps
skipped is `1 / dynamic_growth_steps`).
`LossScaleOptimizer` delegates all public `Optimizer` methods to the inner
optimizer. Additionally, in methods `minimize` and `get_gradients`, it scales
the loss and unscales the gradients. In methods `minimize` and
`apply_gradients`, it additionally updates the loss scale and skips applying
gradients if any gradient has a nonfinite value.
### Hyperparameters
Hyperparameters can be accessed and set on the LossScaleOptimizer, which will
be delegated to the wrapped optimizer.
>>> opt = tf.keras.optimizers.Adam(beta_1=0.8, epsilon=1e-5)
>>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
>>> opt.beta_1 # Equivalent to `opt.inner_optimizer.beta_1`
0.8
>>> opt.beta_1 = 0.7 # Equivalent to `opt.inner_optimizer.beta_1 = 0.7`
>>> opt.beta_1
0.7
>>> opt.inner_optimizer.beta_1
0.7
However, accessing or setting non-hyperparameters is not delegated to the
LossScaleOptimizer. In an Adam optimizer, `beta_1` is a hyperparameter but
`epsilon` is not, as the Adam optimizer only calls `Optimizer._set_hyper` on
`beta_1`.
>>> opt.inner_optimizer.epsilon
1e-5
>>> opt.epsilon
Traceback (most recent call last):
...
AttributeError: 'LossScaleOptimizer' object has no attribute 'epsilon'
>>> opt.epsilon = 1e-4 # This does NOT set epsilon on `opt.inner_optimizer`
>>> opt.inner_optimizer.epsilon
>>> 1e-5
In the above example, despite epsilon being set on the LossScaleOptimizer, the
old epsilon value will still be used when training as epsilon was not set on
the inner optimizer.
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self, inner_optimizer, dynamic=True, initial_scale=None,
dynamic_growth_steps=None):
if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2):
raise TypeError('"inner_optimizer" must be an instance of OptimizerV2, '
'but got: %s' % inner_optimizer)
if not isinstance(dynamic, bool):
# Catch errors if a user incorrectly passes a string or float to the
# second argument argument, as this is commonly done for
# LossScaleOptimizerV1.
raise TypeError('"dynamic" argument to LossScaleOptimizer.__init__ must '
'be a bool, but got: %r' % (dynamic,))
self._raise_if_strategy_unsupported()
self._optimizer = inner_optimizer
# We don't call super().__init__, since we do not want to call OptimizerV2's
# constructor.
_DelegatingTrackableMixin.__init__(self, self._optimizer)
if dynamic:
if initial_scale is None:
initial_scale = _DEFAULT_INITIAL_SCALE
if dynamic_growth_steps is None:
dynamic_growth_steps = _DEFAULT_GROWTH_STEPS
self._loss_scale = _DynamicLossScaleState(
initial_scale, dynamic_growth_steps, multiplier=2)
self._track_trackable(self._loss_scale, 'loss_scale')
else:
if initial_scale is None:
raise ValueError('"initial_scale" must be specified if "dynamic" is '
'False')
self._loss_scale = float(initial_scale)
if dynamic_growth_steps is not None:
raise ValueError('"dynamic_growth_steps" must be None if "dynamic" '
'is False, but got: %s' % (dynamic_growth_steps,))
# To support restoring TensorFlow 2.2 checkpoints.
self._track_trackable(FakeOptimizerForRestoration(self._optimizer),
'base_optimizer')
@property
def dynamic(self):
"""Bool indicating whether dynamic loss scaling is used."""
return isinstance(self._loss_scale, _DynamicLossScaleState)
@property
def loss_scale(self):
"""The current loss scale as a float32 scalar tensor."""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return ops.convert_to_tensor_v2_with_dispatch(
self._loss_scale.current_loss_scale)
else:
return ops.convert_to_tensor_v2_with_dispatch(self._loss_scale)
@property
def dynamic_counter(self):
"""The number of steps since the loss scale was last increased or decreased.
This is None if `LossScaleOptimizer.dynamic` is False.
The counter is incremented every step. Once it reaches
`LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be doubled
and the counter will be reset back to zero. If nonfinite gradients are
encountered, the loss scale will be halved and the counter will be reset
back to zero.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.counter
else:
return None
@property
def initial_scale(self):
"""The initial loss scale.
If `LossScaleOptimizer.dynamic` is False, this is the same number as
`LossScaleOptimizer.loss_scale`, as the loss scale never changes.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.initial_loss_scale
else:
return self._loss_scale
@property
def dynamic_growth_steps(self):
"""The number of steps it takes to increase the loss scale.
This is None if `LossScaleOptimizer.dynamic` is False.
Every `dynamic_growth_steps` consecutive steps with finite gradients, the
loss scale is increased.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.growth_steps
else:
return None
@property
def inner_optimizer(self):
"""The optimizer that this LossScaleOptimizer is wrapping."""
return self._optimizer
def get_scaled_loss(self, loss):
"""Scales the loss by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to scale the loss before
passing the loss to `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_unscaled_gradients` should also be called.
See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for
an example.
Args:
loss: The loss, which will be multiplied by the loss scale. Can either be
a tensor or a callable returning a tensor.
Returns:
`loss` multiplied by `LossScaleOptimizer.loss_scale`.
"""
if callable(loss):
def new_loss():
loss_val = loss()
return loss_val * math_ops.cast(self.loss_scale, loss_val.dtype)
return new_loss
else:
return loss * math_ops.cast(self.loss_scale, loss.dtype)
def get_unscaled_gradients(self, grads):
"""Unscales the gradients by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to unscale the gradients
after computing them with `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_scaled_loss` should also be called. See
the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an
example.
Args:
grads: A list of tensors, each which will be divided by the loss scale.
Can have None values, which are ignored.
Returns:
A new list the same size as `grads`, where every non-None value in `grads`
is divided by `LossScaleOptimizer.loss_scale`.
"""
loss_scale_reciprocal = 1. / self.loss_scale
return [
_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None
for g in grads
]
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
tape = backprop.GradientTape() if tape is None else tape
with tape:
loss = self.get_scaled_loss(loss)
grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access
loss,
var_list,
grad_loss,
tape=tape)
grads = [g for g, _ in grads_and_vars]
weights = [v for _, v in grads_and_vars]
unscaled_grads = self.get_unscaled_gradients(grads)
return list(zip(unscaled_grads, weights))
def get_gradients(self, loss, params):
loss = self.get_scaled_loss(loss)
grads = self._optimizer.get_gradients(loss, params)
return self.get_unscaled_gradients(grads)
def _create_all_weights(self, var_list):
self._optimizer._create_all_weights(var_list) # pylint: disable=protected-access
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
if distribution_strategy_context.in_cross_replica_context():
raise ValueError('apply_gradients() must be called in a replica context.')
# We check for the strategy here despite already checking in the constructor
# as frequently the optimizer is created outside the strategy's scope.
self._raise_if_strategy_unsupported()
grads_and_vars = tuple(grads_and_vars)
return distribution_strategy_context.get_replica_context().merge_call(
self._apply_gradients_cross_replica,
args=(grads_and_vars, name, experimental_aggregate_gradients))
def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name,
experimental_aggregate_gradients):
grads = [g for g, _ in grads_and_vars]
if isinstance(self._loss_scale, _DynamicLossScaleState):
loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads)
else:
loss_scale_update_op = control_flow_ops.no_op()
should_apply_grads = True
def apply_fn():
# We do not want DistributionStrategy to unwrap any MirroredVariables in
# grads_and_vars, because even in a replica context, the wrapped optimizer
# expects mirrored variables. So we wrap the variables with an
# _UnwrapPreventer, preventing DistributionStrategy from unwrapping the
# MirroredVariables.
wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])
return distribution.extended.call_for_each_replica(
self._apply_gradients,
args=(grads, wrapped_vars, name, experimental_aggregate_gradients))
def do_not_apply_fn():
# Normally self._optimizer.iterations is incremented in
# self._optimizer.apply_gradients(). Since that is not called in this
# branch, we increment it here instead.
return self._optimizer.iterations.assign_add(1, read_value=False)
# Note: We must call this cond() in a cross-replica context.
# DistributionStrategy does not support having a cond in a replica context
# with a branch that calls `merge_call`, and self._optimizer.apply_gradients
# calls `merge_call`.
maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn,
do_not_apply_fn)
return control_flow_ops.group(maybe_apply_op, loss_scale_update_op)
def _apply_gradients(self, grads, wrapped_vars, name,
experimental_aggregate_gradients):
# TODO(reedwm): This will raise a fairly cryptic error message if
# self._optimizer.apply_gradients does not take
# experimental_aggregate_gradients.
return self._optimizer.apply_gradients(
list(zip(grads, wrapped_vars.value)), name,
experimental_aggregate_gradients=experimental_aggregate_gradients)
def get_config(self):
serialized_optimizer = optimizers.serialize(self._optimizer)
return {
'inner_optimizer': serialized_optimizer,
'dynamic': self.dynamic,
'initial_scale': self.initial_scale,
'dynamic_growth_steps': self.dynamic_growth_steps,
}
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy() # Make a copy, since we mutate config
if 'loss_scale' in config:
# If loss_scale is in config, we assume we are deserializing a
# LossScaleOptimizer from TF 2.3 or below. We convert the config so it
# can be deserialized in the current LossScaleOptimizer.
loss_scale = keras_loss_scale_module.deserialize(
config.pop('loss_scale'))
if isinstance(loss_scale, loss_scale_module.FixedLossScale):
config['dynamic'] = False
config['initial_scale'] = loss_scale._loss_scale_value # pylint: disable=protected-access
elif isinstance(loss_scale, loss_scale_module.DynamicLossScale):
config['dynamic'] = True
config['initial_scale'] = loss_scale.initial_loss_scale
config['dynamic_growth_steps'] = loss_scale.increment_period
if loss_scale.multiplier != 2:
raise ValueError('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: %s' % (loss_scale,))
else:
raise ValueError(
'Serialized LossScaleOptimizers with a LossScale that is neither a '
'FixedLossScale nor a DynamicLossScale can no longer be '
'deserialized')
config['inner_optimizer'] = config.pop('optimizer')
config['inner_optimizer'] = optimizers.deserialize(
config['inner_optimizer'], custom_objects=custom_objects)
return cls(**config)
def _raise_if_strategy_unsupported(self):
if not strategy_supports_loss_scaling():
strategy = distribution_strategy_context.get_strategy()
if isinstance(strategy,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1,
tpu_strategy.TPUStrategyV2)):
raise ValueError(
'Loss scaling is not supported with TPUStrategy. Loss scaling is '
'unnecessary with TPUs, since they support bfloat16 instead of '
'float16 and bfloat16 does not require loss scaling. You should '
'remove the use of the LossScaleOptimizer when TPUs are used.')
else:
raise ValueError('Loss scaling is not supported with the '
'tf.distribute.Strategy: %s. Try using a different '
'Strategy, e.g. a MirroredStrategy' %
strategy.__class__.__name__)
# Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer
# below.
@property
def iterations(self):
return self._optimizer.iterations
@iterations.setter
def iterations(self, variable):
self._optimizer.iterations = variable
def get_slot_names(self):
return self._optimizer.get_slot_names()
def variables(self):
return self._optimizer.variables()
@property
def weights(self):
return self._optimizer.weights
def get_weights(self):
return self._optimizer.get_weights()
def set_weights(self, weights):
return self._optimizer.set_weights(weights)
@property
def clipnorm(self):
return self._optimizer.clipnorm
@clipnorm.setter
def clipnorm(self, val):
self._optimizer.clipnorm = val
@property
def global_clipnorm(self):
return self._optimizer.global_clipnorm
@global_clipnorm.setter
def global_clipnorm(self, val):
self._optimizer.global_clipnorm = val
@property
def clipvalue(self):
return self._optimizer.clipvalue
@clipvalue.setter
def clipvalue(self, val):
self._optimizer.clipvalue = val
def _aggregate_gradients(self, grads_and_vars):
return self._optimizer._aggregate_gradients(grads_and_vars) # pylint: disable=protected-access
def _restore_slot_variable(self, slot_name, variable, slot_variable):
return self._optimizer._restore_slot_variable(slot_name, variable, # pylint: disable=protected-access
slot_variable)
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
def get_slot(self, var, slot_name):
return self._optimizer.get_slot(var, slot_name)
def add_slot(self, var, slot_name, initializer='zeros'):
return self._optimizer.add_slot(var, slot_name, initializer)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError as e:
if name == '_optimizer' or name == '_hyper':
# Avoid infinite recursion
raise e
# Delegate hyperparameter accesses to inner optimizer.
if name == 'lr':
name = 'learning_rate'
if name in self._optimizer._hyper:
return self._optimizer._get_hyper(name)
raise e
def __dir__(self):
result = set(super(LossScaleOptimizer, self).__dir__())
if '_optimizer' in result:
result |= self._optimizer._hyper.keys()
if 'learning_rate' in self._optimizer._hyper.keys():
result.add('lr')
return list(result)
def __setattr__(self, name, value):
if name == 'lr':
name = 'learning_rate'
# Delegate setting hyperparameter to inner optimizer if the attribute does
# not exist on the LossScaleOptimizer
try:
# We cannot check for the 'iterations' attribute as it cannot be set after
# it is accessed.
if name != 'iterations':
object.__getattribute__(self, name)
has_attribute = True
except AttributeError:
has_attribute = False
if (name != '_optimizer' and name in self._optimizer._hyper
and not has_attribute):
self._optimizer._set_hyper(name, value)
else:
super(LossScaleOptimizer, self).__setattr__(name, value)
# We do not override some OptimizerV2 methods. For each, we describe why we do
# not delegate them to self._optimizer:
# * get_updates: get_updates() calls get_gradients(). Since we override
# get_gradients(), we cannot delegate get_updates() to self._optimizer,
# otherwise the overridden get_gradients() method would not be called.
# Luckily, get_updates() does not access any OptimizerV2 fields, so
# inheriting the OptimizerV2 version works fine.
# * minimize: We don't delegate for a similar as get_updates(): it calls
# both self._compute_gradients() and self.apply_gradients(), and both need
# to have the LossScaleOptimizer version called.
# TODO(reedwm): Maybe throw an error if mixed precision is used without this
# optimizer being used.
@keras_export('keras.mixed_precision.experimental.LossScaleOptimizer')
class LossScaleOptimizerV1(LossScaleOptimizer):
"""An deprecated optimizer that applies loss scaling.
Warning: This class is deprecated and will be removed in TensorFlow 2.5.
Please use the non-experimental class
`tf.keras.mixed_precision.LossScaleOptimizer` instead.
This class is identical to the non-experimental
`keras.mixed_precision.LossScaleOptimizer` except its constructor takes
different arguments. For this class (the experimental version), the
constructor takes a `loss_scale` argument. For the non-experimental class,
the constructor encodes the loss scaling information in multiple arguments.
Note that unlike this class, the non-experimental class does not accept a
`tf.compat.v1.mixed_precision.LossScale`, which is deprecated.
If you currently use this class, you should switch to the non-experimental
`tf.keras.mixed_precision.LossScaleOptimizer` instead. We show several
examples of converting the use of the experimental class to the equivalent
non-experimental class.
>>> # In all of the the examples below, `opt1` and `opt2` are identical
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale='dynamic')
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD())
>>> assert opt1.get_config() == opt2.get_config()
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale=123)
>>> # dynamic=False indicates to use fixed loss scaling. initial_scale=123
>>> # refers to the initial loss scale, which is the single fixed loss scale
>>> # when dynamic=False.
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), dynamic=False, initial_scale=123)
>>> assert opt1.get_config() == opt2.get_config()
>>> loss_scale = tf.compat.v1.mixed_precision.experimental.DynamicLossScale(
... initial_loss_scale=2048, increment_period=500)
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale=loss_scale)
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), initial_scale=2048,
... dynamic_growth_steps=500)
>>> assert opt1.get_config() == opt2.get_config()
Make sure to also switch from this class to the non-experimental class in
isinstance checks, if you have any. If you do not do this, your model may run
into hard-to-debug issues, as the experimental `LossScaleOptimizer` subclasses
the non-experimental `LossScaleOptimizer`, but not vice versa. It is safe to
switch isinstance checks to the non-experimental `LossScaleOptimizer` even
before using the non-experimental `LossScaleOptimizer`.
>>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
... tf.keras.optimizers.SGD(), loss_scale='dynamic')
>>> # The experimental class subclasses the non-experimental class
>>> isinstance(opt1, tf.keras.mixed_precision.LossScaleOptimizer)
True
>>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(
... tf.keras.optimizers.SGD())
>>> # The non-experimental class does NOT subclass the experimental class.
>>> isinstance(opt2, tf.keras.mixed_precision.experimental.LossScaleOptimizer)
False
Args:
optimizer: The Optimizer instance to wrap.
loss_scale: The loss scale to scale the loss and gradients. This can
either be an int/float to use a fixed loss scale, the string "dynamic"
to use dynamic loss scaling, or an instance of a LossScale. The string
"dynamic" equivalent to passing `DynamicLossScale()`, and passing an
int/float is equivalent to passing a FixedLossScale with the given loss
scale. If a DynamicLossScale is passed, DynamicLossScale.multiplier must
be 2 (the default).
"""
def __init__(self, optimizer, loss_scale):
warn_msg_prefix = (
'tf.keras.mixed_precision.experimental.LossScaleOptimizer is '
'deprecated. Please use tf.keras.mixed_precision.LossScaleOptimizer '
'instead. ')
if isinstance(loss_scale, dict):
loss_scale = keras_loss_scale_module.deserialize(loss_scale)
if isinstance(loss_scale, (int, float)):
tf_logging.warn(
warn_msg_prefix + 'For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt, dynamic=False, initial_scale={})'.format(loss_scale))
super(LossScaleOptimizerV1, self).__init__(optimizer, dynamic=False,
initial_scale=loss_scale)
elif isinstance(loss_scale, loss_scale_module.FixedLossScale):
ls_val = loss_scale._loss_scale_value # pylint: disable=protected-access
tf_logging.warn(
warn_msg_prefix + 'For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt, dynamic=False, initial_scale={})'.format(ls_val))
super(LossScaleOptimizerV1, self).__init__(optimizer, dynamic=False,
initial_scale=ls_val)
elif loss_scale == 'dynamic':
tf_logging.warn(
warn_msg_prefix + 'For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt)')
super(LossScaleOptimizerV1, self).__init__(optimizer)
elif isinstance(loss_scale, loss_scale_module.DynamicLossScale):
kwargs = {}
extra_arguments = ''
if loss_scale.initial_loss_scale != _DEFAULT_INITIAL_SCALE:
kwargs['initial_scale'] = loss_scale.initial_loss_scale
extra_arguments += (', initial_scale=%s' %
loss_scale.initial_loss_scale)
if loss_scale.increment_period != _DEFAULT_GROWTH_STEPS:
kwargs['dynamic_growth_steps'] = loss_scale.increment_period
extra_arguments += (', dynamic_growth_steps=%s' %
loss_scale.increment_period)
if loss_scale.multiplier != 2:
raise ValueError('When passing a DynamicLossScale to "loss_scale", '
'DynamicLossScale.multiplier must be 2. Got: %s'
% (loss_scale,))
tf_logging.warn(
warn_msg_prefix +
'Note that the non-experimental LossScaleOptimizer does not take a '
'DynamicLossScale but instead takes the dynamic configuration '
'directly in the constructor. For example:\n'
' opt = tf.keras.mixed_precision.LossScaleOptimizer('
'opt{})\n'.format(extra_arguments))
super(LossScaleOptimizerV1, self).__init__(optimizer, **kwargs)
elif isinstance(loss_scale, loss_scale_module.LossScale):
raise TypeError('Passing a LossScale that is not a FixedLossScale or a '
'DynamicLossScale is no longer supported. Got: {}'
.format(loss_scale))
else:
raise ValueError('Invalid value passed to loss_scale. loss_scale '
'must be the string "dynamic" (recommended), an int, '
'a float, a FixedLossScale, or a DynamicLossScale. Got '
'value: {}'.format(loss_scale))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy() # Make a copy, since we mutate config
# If loss_scale is in config, we assume we are deserializing a
# LossScaleOptimizer from TF 2.3 or below. Otherwise, we assume we are
# deserializing a LossScaleOptimizer from TF 2.4 or above.
if 'loss_scale' in config:
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'])
if (isinstance(config['loss_scale'], loss_scale_module.DynamicLossScale)
and config['loss_scale'].multiplier != 2):
raise ValueError('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: %s' % (config['loss_scale'],))
config['optimizer'] = optimizers.deserialize(
config['optimizer'], custom_objects=custom_objects)
return cls(**config)
# We convert the config, as generated by LossScaleOptimizer.get_config, to a
# version that can be passed to LossScaleOptimizerV1.__init__
if config['dynamic']:
config['loss_scale'] = loss_scale_module.DynamicLossScale(
config['initial_scale'], config['dynamic_growth_steps'], multiplier=2)
else:
config['loss_scale'] = loss_scale_module.FixedLossScale(
config['initial_scale'])
del config['dynamic']
del config['initial_scale']
del config['dynamic_growth_steps']
config['optimizer'] = optimizers.deserialize(
config.pop('inner_optimizer'), custom_objects=custom_objects)
return cls(**config)
class FakeOptimizerForRestoration(trackable.Trackable):
"""A fake optimizer used to support restoring TensorFlow 2.2 checkpoints.
The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class
exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow.
In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the
following in LossScaleOptimizer.__init__
```
self._track_trackable(self._optimizer, 'base_optimizer')
```
This means a dependency from the LossScaleOptimizer to the wrapped optimizer
would be stored in the checkpoint. However now, the checkpoint format with a
LossScaleOptimizer is the same as the format without a LossScaleOptimizer,
except the loss scale is also stored. This means there is no dependency from
the LossScaleOptimizer to the wrapped optimizer. Instead, the
LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's
perspective, by overriding all Trackable methods and delegating them to the
wrapped optimizer.
To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency
on this class instead of the inner optimizer. When restored, this class will
instead restore the slot variables of the inner optimizer. Since this class
has no variables, it does not affect the checkpoint when saved.
"""
def __init__(self, optimizer):
self._optimizer = optimizer
def get_slot_names(self):
return self._optimizer.get_slot_names()
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
# pylint: disable=protected-access
mixed_precision._register_wrapper_optimizer_cls(optimizer_v2.OptimizerV2,
LossScaleOptimizerV1)
def _multiply_gradient(gradient, scale):
"""Multiply a (possibly sparse) gradient by the given scale factor."""
scale = math_ops.cast(scale, gradient.dtype)
if isinstance(gradient, ops.IndexedSlices):
return ops.IndexedSlices(
gradient.values * scale,
gradient.indices,
dense_shape=gradient.dense_shape)
else:
return gradient * scale
def strategy_supports_loss_scaling():
"""Returns True if the current Strategy supports loss scaling."""
if not distribution_strategy_context.has_strategy():
return True
strategy = distribution_strategy_context.get_strategy()
# Strategies are supported if either there is only one replica or if variables
# are replicated per device. Otherwise, the current model.fit() implementation
# and most custom training loops incorrectly unscale the gradients. Currently,
# gradients are unscaled once per compute replica, but they should be unscaled
# once per variable replica. When there is one variable replica for each
# compute replica, this works fine, but otherwise issues will occur.
# TODO(reedwm): Support all strategies.
return isinstance(strategy, (
collective_all_reduce_strategy.CollectiveAllReduceStrategy,
collective_all_reduce_strategy.CollectiveAllReduceStrategyV1,
one_device_strategy.OneDeviceStrategy,
one_device_strategy.OneDeviceStrategyV1,
mirrored_strategy.MirroredStrategy,
mirrored_strategy.MirroredStrategyV1,
))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
import tensorflow.compat.v2 as tf
import copy
from absl.testing import parameterized
import numpy as np
import keras
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
@test_combinations.generate(test_combinations.keras_mode_combinations())
class SimpleRNNLayerTest(tf.test.TestCase, parameterized.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@test_utils.run_v2_only
def test_float64_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('rmsprop', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_deep_copy_SimpleRNN(self):
cell = keras.layers.SimpleRNNCell(5)
copied_cell = copy.deepcopy(cell)
self.assertEqual(copied_cell.units, 5)
self.assertEqual(cell.get_config(), copied_cell.get_config())
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertLen(layer.losses, 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertLen(layer.losses, 4)
else:
self.assertLen(layer.get_losses_for(x), 1)
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=test_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.SimpleRNNCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
_, state = cell(np.ones((batch_size, 20), dtype=np.float32), initial_state)
self.assertEqual(state.shape, initial_state.shape)
if __name__ == '__main__':
tf.test.main()
|
|
import file_writer as fw
import flux_simulator as fs
import quantifiers as qs
import os.path
import parameters
import piquant_options as po
RUN_SCRIPT = "run_quantification.sh"
TRANSCRIPT_COUNTS_SCRIPT = "count_transcripts_for_genes.py"
UNIQUE_SEQUENCE_SCRIPT = "calculate_unique_transcript_sequence.py"
ASSEMBLE_DATA_SCRIPT = "assemble_quantification_data.py"
ANALYSE_DATA_SCRIPT = "analyse_quantification_run.py"
RUN_PREQUANTIFICATION_VARIABLE = "RUN_PREQUANTIFICATION"
QUANTIFY_TRANSCRIPTS_VARIABLE = "QUANTIFY_TRANSCRIPTS"
ANALYSE_RESULTS_VARIABLE = "ANALYSE_RESULTS"
TPMS_FILE = "tpms.csv"
TRANSCRIPT_COUNTS_FILE = "transcript_counts.csv"
UNIQUE_SEQUENCE_FILE = "unique_sequence.csv"
def _get_script_path(script_name):
return os.path.join(
os.path.abspath(os.path.dirname(__file__)), script_name)
def _get_transcript_counts_file(quantifier_dir):
return os.path.join(quantifier_dir, TRANSCRIPT_COUNTS_FILE)
def _get_unique_sequence_file(quantifier_dir):
return os.path.join(quantifier_dir, UNIQUE_SEQUENCE_FILE)
def _add_run_prequantification(
writer, quant_method, quant_params,
quantifier_dir, transcript_gtf_file):
with writer.if_block("-n \"$RUN_PREQUANTIFICATION\""):
# Perform preparatory tasks required by a particular quantification
# method prior to calculating abundances; for example, this might
# include mapping reads to the genome with TopHat
quant_method.write_preparatory_commands(writer, quant_params)
with writer.section():
_add_calculate_transcripts_per_gene(
writer, quantifier_dir, transcript_gtf_file)
with writer.section():
_add_calculate_unique_sequence_length(
writer, quantifier_dir, transcript_gtf_file)
def _add_quantify_transcripts(writer, quant_method, quant_params, cleanup):
# Use the specified quantification method to calculate per-transcript TPMs
with writer.if_block("-n \"$QUANTIFY_TRANSCRIPTS\""):
with writer.section():
writer.add_comment(
"Use {method} to calculate per-transcript TPMs.".format(
method=quant_method))
quant_method.write_quantification_commands(writer, quant_params)
if cleanup:
writer.add_comment(
"Remove files not necessary for analysis of quantification.")
quant_method.write_post_quantification_cleanup(writer)
def _add_calculate_transcripts_per_gene(
writer, quantifier_dir, transcript_gtf_file):
# Calculate the number of transcripts per gene and write to a file
writer.add_comment("Calculate the number of transcripts per gene.")
counts_file = _get_transcript_counts_file(quantifier_dir)
with writer.if_block("! -f " + counts_file):
writer.add_line("{command} {transcript_gtf} > {counts_file}".format(
command=_get_script_path(TRANSCRIPT_COUNTS_SCRIPT),
transcript_gtf=transcript_gtf_file,
counts_file=counts_file))
def _add_calculate_unique_sequence_length(
writer, quantifier_dir, transcript_gtf_file):
# Calculate the length of unique sequence per transcript and write to a
# file.
writer.add_comment(
"Calculate the length of unique sequence per transcript.")
unique_seq_file = _get_unique_sequence_file(quantifier_dir)
with writer.if_block("! -f " + unique_seq_file):
writer.add_line(
"{command} {transcript_gtf} > {unique_seq_file}".format(
command=_get_script_path(UNIQUE_SEQUENCE_SCRIPT),
transcript_gtf=transcript_gtf_file,
unique_seq_file=unique_seq_file))
def _add_assemble_quantification_data(
writer, quantifier_dir, fs_pro_file, quant_method):
# Now assemble data required for analysis of quantification performance
# into one file
writer.add_comment(
"Assemble data required for analysis of quantification performance " +
"into one file")
writer.add_line(
("{command} --method={method} --out={out_file} {fs_pro_file} " +
"{counts_file} {unique_seq_file}").format(
command=_get_script_path(ASSEMBLE_DATA_SCRIPT),
method=quant_method,
out_file=TPMS_FILE,
fs_pro_file=fs_pro_file,
counts_file=_get_transcript_counts_file(quantifier_dir),
unique_seq_file=_get_unique_sequence_file(quantifier_dir)))
def _add_analyse_quantification_results(
writer, run_dir, piquant_options, **params):
# Finally perform analysis on the calculated TPMs
writer.add_comment("Perform analysis on calculated TPMs.")
options_dict = {p.name: p.option_name for
p in parameters.get_run_parameters()}
params_spec = ""
for param_name, param_val in params.items():
params_spec += "{name}={val} ".format(
name=options_dict[param_name],
val=str(param_val))
writer.add_line(
("{command} --plot-format={format} " +
"--grouped-threshold={gp_threshold} {params_spec} " +
"{tpms_file} {output_basename}").format(
command=_get_script_path(ANALYSE_DATA_SCRIPT),
format=piquant_options[po.PLOT_FORMAT],
gp_threshold=piquant_options[po.GROUPED_THRESHOLD],
params_spec=params_spec,
tpms_file=TPMS_FILE,
output_basename=os.path.basename(run_dir)))
def _add_process_command_line_options(writer):
# Process command line options - these allow us to subsequently re-run just
# part of the analysis
writer.add_comment("Process command line options.")
with writer.section():
writer.set_variable(RUN_PREQUANTIFICATION_VARIABLE, "")
writer.set_variable(QUANTIFY_TRANSCRIPTS_VARIABLE, "")
writer.set_variable(ANALYSE_RESULTS_VARIABLE, "")
with writer.while_block("getopts \":pqa\" opt"):
with writer.case_block("$opt"):
with writer.case_option_block("p"):
writer.set_variable(RUN_PREQUANTIFICATION_VARIABLE, 1)
with writer.case_option_block("q"):
writer.set_variable(QUANTIFY_TRANSCRIPTS_VARIABLE, 1)
with writer.case_option_block("a"):
writer.set_variable(ANALYSE_RESULTS_VARIABLE, 1)
with writer.case_option_block("\?"):
writer.add_line("echo \"Invalid option: -$OPTARG\" >&2")
def _add_analyse_results(
writer, reads_dir, run_dir, quantifier_dir, piquant_options,
quant_method, read_length, read_depth, paired_end, errors, bias):
fs_pro_file = os.path.join(reads_dir, fs.EXPRESSION_PROFILE_FILE)
with writer.if_block("-n \"$ANALYSE_RESULTS\""):
with writer.section():
_add_assemble_quantification_data(
writer, quantifier_dir, fs_pro_file, quant_method)
_add_analyse_quantification_results(
writer, run_dir, piquant_options,
quant_method=quant_method,
read_length=read_length, read_depth=read_depth,
paired_end=paired_end, errors=errors, bias=bias)
def _get_quant_params(reads_dir, quantifier_dir, transcript_gtf,
genome_fasta, paired_end, errors):
quant_params = {
qs.TRANSCRIPT_GTF_FILE: transcript_gtf,
qs.GENOME_FASTA_DIR: genome_fasta,
qs.QUANTIFIER_DIRECTORY: quantifier_dir,
qs.FASTQ_READS: errors
}
if paired_end:
quant_params[qs.LEFT_SIMULATED_READS] = \
os.path.join(reads_dir,
fs.get_reads_file(errors, paired_end=fs.LEFT_READS))
quant_params[qs.RIGHT_SIMULATED_READS] = \
os.path.join(reads_dir,
fs.get_reads_file(errors, paired_end=fs.RIGHT_READS))
else:
quant_params[qs.SIMULATED_READS] = \
os.path.join(reads_dir, fs.get_reads_file(errors))
return quant_params
def write_run_quantification_script(
reads_dir, run_dir, piquant_options,
quant_method=None, read_length=50, read_depth=10,
paired_end=False, errors=False, bias=False,
transcript_gtf=None, genome_fasta=None):
os.mkdir(run_dir)
with fw.writing_to_file(
fw.BashScriptWriter, run_dir, RUN_SCRIPT) as writer:
with writer.section():
_add_process_command_line_options(writer)
quantifier_dir = os.path.join(
piquant_options[po.OUTPUT_DIRECTORY], "quantifier_scratch")
quant_params = _get_quant_params(
reads_dir, quantifier_dir, transcript_gtf,
genome_fasta, paired_end, errors)
with writer.section():
_add_run_prequantification(
writer, quant_method, quant_params,
quantifier_dir, transcript_gtf)
with writer.section():
cleanup = not piquant_options[po.NO_CLEANUP]
_add_quantify_transcripts(
writer, quant_method, quant_params, cleanup)
_add_analyse_results(
writer, reads_dir, run_dir, quantifier_dir, piquant_options,
quant_method, read_length, read_depth, paired_end, errors, bias)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generator script for proxy tests.
See AndroidProxySelectorTest.java
and net/proxy_resolution/proxy_config_service_android_unittest.cc
To generate C++, run this script without arguments.
To generate Java, run this script with -j argument.
Note that this generator is not run as part of the build process because
we are assuming that these test cases will not change often.
"""
import optparse
test_cases = [
{
"name": "NoProxy",
"description" : "Test direct mapping when no proxy defined.",
"properties" : {
},
"mappings" : {
"http://example.com/" : "DIRECT",
"ftp://example.com/" : "DIRECT",
"https://example.com/" : "DIRECT",
}
},
{
"name": "HttpProxyHostAndPort",
"description" : "Test http.proxyHost and http.proxyPort works.",
"properties" : {
"http.proxyHost" : "httpproxy.com",
"http.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "PROXY httpproxy.com:8080",
"ftp://example.com/" : "DIRECT",
"https://example.com/" : "DIRECT",
}
},
{
"name": "HttpProxyHostOnly",
"description" : "We should get the default port (80) for proxied hosts.",
"properties" : {
"http.proxyHost" : "httpproxy.com",
},
"mappings" : {
"http://example.com/" : "PROXY httpproxy.com:80",
"ftp://example.com/" : "DIRECT",
"https://example.com/" : "DIRECT",
}
},
{
"name": "HttpProxyPortOnly",
"description" :
"http.proxyPort only should not result in any hosts being proxied.",
"properties" : {
"http.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "DIRECT",
"ftp://example.com/" : "DIRECT",
"https://example.com/" : "DIRECT"
}
},
{
"name": "HttpNonProxyHosts1",
"description" : "Test that HTTP non proxy hosts are mapped correctly",
"properties" : {
"http.nonProxyHosts" : "slashdot.org",
"http.proxyHost" : "httpproxy.com",
"http.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "PROXY httpproxy.com:8080",
"http://slashdot.org/" : "DIRECT",
}
},
{
"name": "HttpNonProxyHosts2",
"description" : "Test that | pattern works.",
"properties" : {
"http.nonProxyHosts" : "slashdot.org|freecode.net",
"http.proxyHost" : "httpproxy.com",
"http.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "PROXY httpproxy.com:8080",
"http://slashdot.org/" : "DIRECT",
"http://freecode.net/" : "DIRECT",
}
},
{
"name": "HttpNonProxyHosts3",
"description" : "Test that * pattern works.",
"properties" : {
"http.nonProxyHosts" : "*example.com",
"http.proxyHost" : "httpproxy.com",
"http.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "DIRECT",
"http://www.example.com/" : "DIRECT",
"http://slashdot.org/" : "PROXY httpproxy.com:8080",
}
},
{
"name": "FtpNonProxyHosts",
"description" : "Test that FTP non proxy hosts are mapped correctly",
"properties" : {
"ftp.nonProxyHosts" : "slashdot.org",
"ftp.proxyHost" : "httpproxy.com",
"ftp.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "DIRECT",
"ftp://example.com/" : "PROXY httpproxy.com:8080",
}
},
{
"name": "FtpProxyHostAndPort",
"description" : "Test ftp.proxyHost and ftp.proxyPort works.",
"properties" : {
"ftp.proxyHost" : "httpproxy.com",
"ftp.proxyPort" : "8080",
},
"mappings" : {
"ftp://example.com/" : "PROXY httpproxy.com:8080",
"http://example.com/" : "DIRECT",
"https://example.com/" : "DIRECT",
}
},
{
"name": "FtpProxyHostOnly",
"description" : "Test ftp.proxyHost and default port.",
"properties" : {
"ftp.proxyHost" : "httpproxy.com",
},
"mappings" : {
"ftp://example.com/" : "PROXY httpproxy.com:80",
"http://example.com/" : "DIRECT",
"https://example.com/" : "DIRECT",
}
},
{
"name": "HttpsProxyHostAndPort",
"description" : "Test https.proxyHost and https.proxyPort works.",
"properties" : {
"https.proxyHost" : "httpproxy.com",
"https.proxyPort" : "8080",
},
"mappings" : {
"https://example.com/" : "PROXY httpproxy.com:8080",
"http://example.com/" : "DIRECT",
"ftp://example.com/" : "DIRECT",
}
},
{
"name": "HttpsProxyHostOnly",
"description" : "Test https.proxyHost and default port.",
# Chromium differs from the Android platform by connecting to port 80 for
# HTTPS connections by default, hence cpp-only.
"cpp-only" : "",
"properties" : {
"https.proxyHost" : "httpproxy.com",
},
"mappings" : {
"https://example.com/" : "PROXY httpproxy.com:80",
"http://example.com/" : "DIRECT",
"ftp://example.com/" : "DIRECT",
}
},
{
"name": "HttpProxyHostIPv6",
"description" : "Test IPv6 https.proxyHost and default port.",
"cpp-only" : "",
"properties" : {
"http.proxyHost" : "a:b:c::d:1",
},
"mappings" : {
"http://example.com/" : "PROXY [a:b:c::d:1]:80",
"ftp://example.com/" : "DIRECT",
}
},
{
"name": "HttpProxyHostAndPortIPv6",
"description" : "Test IPv6 http.proxyHost and http.proxyPort works.",
"cpp-only" : "",
"properties" : {
"http.proxyHost" : "a:b:c::d:1",
"http.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "PROXY [a:b:c::d:1]:8080",
"ftp://example.com/" : "DIRECT",
}
},
{
"name": "HttpProxyHostAndInvalidPort",
"description" : "Test invalid http.proxyPort does not crash.",
"cpp-only" : "",
"properties" : {
"http.proxyHost" : "a:b:c::d:1",
"http.proxyPort" : "65536",
},
"mappings" : {
"http://example.com/" : "DIRECT",
"ftp://example.com/" : "DIRECT",
}
},
{
"name": "DefaultProxyExplictPort",
"description" :
"Default http proxy is used if a scheme-specific one is not found.",
"properties" : {
"proxyHost" : "defaultproxy.com",
"proxyPort" : "8080",
"ftp.proxyHost" : "httpproxy.com",
"ftp.proxyPort" : "8080",
},
"mappings" : {
"http://example.com/" : "PROXY defaultproxy.com:8080",
"https://example.com/" : "PROXY defaultproxy.com:8080",
"ftp://example.com/" : "PROXY httpproxy.com:8080",
}
},
{
"name": "DefaultProxyDefaultPort",
"description" : "Check that the default proxy port is as expected.",
# Chromium differs from the Android platform by connecting to port 80 for
# HTTPS connections by default, hence cpp-only.
"cpp-only" : "",
"properties" : {
"proxyHost" : "defaultproxy.com",
},
"mappings" : {
"http://example.com/" : "PROXY defaultproxy.com:80",
"https://example.com/" : "PROXY defaultproxy.com:80",
}
},
{
"name": "FallbackToSocks",
"description" : "SOCKS proxy is used if scheme-specific one is not found.",
"properties" : {
"http.proxyHost" : "defaultproxy.com",
"socksProxyHost" : "socksproxy.com"
},
"mappings" : {
"http://example.com/" : "PROXY defaultproxy.com:80",
"https://example.com/" : "SOCKS5 socksproxy.com:1080",
"ftp://example.com" : "SOCKS5 socksproxy.com:1080",
}
},
{
"name": "SocksExplicitPort",
"description" : "SOCKS proxy port is used if specified",
"properties" : {
"socksProxyHost" : "socksproxy.com",
"socksProxyPort" : "9000",
},
"mappings" : {
"http://example.com/" : "SOCKS5 socksproxy.com:9000",
}
},
{
"name": "HttpProxySupercedesSocks",
"description" : "SOCKS proxy is ignored if default HTTP proxy defined.",
"properties" : {
"proxyHost" : "defaultproxy.com",
"socksProxyHost" : "socksproxy.com",
"socksProxyPort" : "9000",
},
"mappings" : {
"http://example.com/" : "PROXY defaultproxy.com:80",
}
},
]
class GenerateCPlusPlus:
"""Generate C++ test cases"""
def Generate(self):
for test_case in test_cases:
print ("TEST_F(ProxyConfigServiceAndroidTest, %s) {" % test_case["name"])
if "description" in test_case:
self._GenerateDescription(test_case["description"]);
self._GenerateConfiguration(test_case["properties"])
self._GenerateMappings(test_case["mappings"])
print "}"
print ""
def _GenerateDescription(self, description):
print " // %s" % description
def _GenerateConfiguration(self, properties):
for key in sorted(properties.iterkeys()):
print " AddProperty(\"%s\", \"%s\");" % (key, properties[key])
print " ProxySettingsChanged();"
def _GenerateMappings(self, mappings):
for url in sorted(mappings.iterkeys()):
print " TestMapping(\"%s\", \"%s\");" % (url, mappings[url])
class GenerateJava:
"""Generate Java test cases"""
def Generate(self):
for test_case in test_cases:
if "cpp-only" in test_case:
continue
if "description" in test_case:
self._GenerateDescription(test_case["description"]);
print " @SmallTest"
print " @Feature({\"AndroidWebView\"})"
print " public void test%s() throws Exception {" % test_case["name"]
self._GenerateConfiguration(test_case["properties"])
self._GenerateMappings(test_case["mappings"])
print " }"
print ""
def _GenerateDescription(self, description):
print " /**"
print " * %s" % description
print " *"
print " * @throws Exception"
print " */"
def _GenerateConfiguration(self, properties):
for key in sorted(properties.iterkeys()):
print " System.setProperty(\"%s\", \"%s\");" % (
key, properties[key])
def _GenerateMappings(self, mappings):
for url in sorted(mappings.iterkeys()):
mapping = mappings[url]
if 'HTTPS' in mapping:
mapping = mapping.replace('HTTPS', 'PROXY')
print " checkMapping(\"%s\", \"%s\");" % (url, mapping)
def main():
parser = optparse.OptionParser()
parser.add_option("-j", "--java",
action="store_true", dest="java");
(options, args) = parser.parse_args();
if options.java:
generator = GenerateJava()
else:
generator = GenerateCPlusPlus()
generator.Generate()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import os
import csv
import numpy as np
import dirfiles
from confparser import load_config
import expyriment
from expyriment import design, control, stimuli, io, misc
def launch_protocol(protocol_ini, exp, gender, vs):
# %%
# ======================== LOAD CONFIG.INI FILE ===========================
# Select .ini file for instructions
setting = load_config(protocol_ini)
# %%
# ========================== LOAD INPUT FILES =============================
# Define the pathway of the inputs directory
inputs_path = os.path.abspath(setting["inputs_dir"] + gender +
'/version_' + vs)
print inputs_path
# List input csv files
inputs_filenames = dirfiles.listdir_csvnohidden(inputs_path)
inputs_filenames.sort()
# %%
# ======== WAITS FOR USER TO ENTER BLOCK (AKA RUN) NUMBER TO START ========
# Define number of runs
nb_block = len(inputs_filenames)
# Wait 5 seconds in order to launch input text screen
exp.keyboard.wait(duration=5000)
# Create text input box
ti = io.TextInput(message='Block number:', message_text_size=24,
message_colour=map(int, setting["bcolor"]),
user_text_colour=map(int, setting["ucolor"]),
ascii_filter=misc.constants.K_ALL_DIGITS,
frame_colour=(70, 70, 70))
# Load user's input
while True:
sb = ti.get('0')
# If string is empty
if not sb:
warning_message1 = stimuli.TextLine(setting["wm1"].decode('utf-8'),
text_size=24,
text_colour=(204, 0, 0))
warning_message1.present()
exp.keyboard.wait(misc.constants.K_RETURN, duration=5000)
continue
# If block number introduced is higher than the number of blocks
# preset in config file
elif int(sb) >= nb_block:
warning_message2 = stimuli.TextLine(setting["wm2"].decode('utf-8'),
text_size=24,
text_colour=(204, 0, 0))
warning_message2.present()
exp.keyboard.wait(misc.constants.K_RETURN, duration=5000)
continue
else:
start_block = int(sb)
break
# %%
# ============================== DESIGN ===================================
# Stimuli sequence of the protocol
session_list = [[i for i in csv.reader(open(inputs_filename))]
for inputs_filename in inputs_filenames]
# Define the blocks using expyriment module
block_list = [expyriment.design.Block(name="block%d" % bs)
for bs in np.arange(nb_block)]
# For all blocks in the block list...
for bl in np.arange(nb_block):
# ...add stimuli to the trials and add trials to the blocks
for l,line in enumerate(session_list[bl]):
# Create a trial
trial = design.Trial()
# Retrieve variables from input files at every trial and
# label them according to what is defined by var_names
for tsf in np.arange(len(setting["var_names"]) - 1):
trial.set_factor(setting["var_names"][tsf],
line[tsf].decode('utf-8'))
trial.set_factor(setting["var_names"][-1],
line[-2].decode('utf-8'))
# Create stimuli...
if line[1] == '0':
# ... (1) for Rest trial,
# (i.e. between encoding and recognition), ...
if line[0] == '+':
fixcross_isi = stimuli.FixCross(size=(30, 30),
line_width=3,
colour=(255, 255, 255))
# Add fixation cross to the trial
trial.add_stimulus(fixcross_isi)
# (2) for Instructions trial, ...
else:
instruction = stimuli.TextLine(line[4].decode('utf-8'),
position=(0, 250),
text_size=56,
text_colour=(255, 153, 51))
question = stimuli.TextLine(line[0].decode('utf-8'),
position=(0, 0),
text_size=58,)
question_reminder = stimuli.TextLine(
line[0].decode('utf-8'), position=(0, 250),
text_size=56, text_colour=(255, 153, 51))
# Add instructions to the trial
trial.add_stimulus(instruction)
trial.add_stimulus(question)
# ... and (3) for active trial.
else:
# Add adjectives to the trial
adjective = stimuli.TextLine(line[0].decode('utf-8'),
text_size=58,
position=(0, 0))
yes_answer = stimuli.TextLine(setting["yes_key_indication"],
position=(-350, -250),
text_size=60)
no_answer = stimuli.TextLine(setting["no_key_indication"],
position=(300, -250),
text_size=60)
trial.add_stimulus(question_reminder)
trial.add_stimulus(adjective)
trial.add_stimulus(yes_answer)
trial.add_stimulus(no_answer)
# Add trial to run
block_list[bl].add_trial(trial)
# Add block to the experiment
for ad in np.arange(nb_block):
exp.add_block(block_list[ad])
# Print exp. variable names in the log file
exp.data_variable_names = setting["llog_var_names"]
# # %%
# # ================ DEFINE AND PRELOAD SOME STIMULI ======================
# TTL cross
fixcross_ttl = stimuli.FixCross(size=(40, 40), line_width=3,
colour=(255, 255, 0))
fixcross_ttl.preload()
# # Message at the end of each session
blockend_message = stimuli.TextLine(setting["text_end_session"],
text_size=44,
text_colour=(255, 153, 51))
blockend_message.preload()
# # Final message before quitting the experiment
text_end = stimuli.TextBox(str(''.join((setting["text_end_exp_one"],
'\n\n',
setting["text_end_exp_two"]))).decode('utf-8'),
(1000, 1000), position=(0, -400),
text_size=44, text_colour=(255, 153, 51))
text_end.preload()
# # %%
# # ================================ RUN ==================================
# # =======================================================================
# # Starts running the experiment:
# # (1) Present a screen asking for the subject no. (exp.subject) and
# # wait for the RETURN key
# # (2) Create a data file (exp.data)
# # (3) Present the "Ready" screen
# # =======================================================================
control.start(exp, skip_ready_screen=True)
# # =======================================================================
# # Run the protocol
# # =======================================================================
stop = False
found_key = 0
key_totalexp = []
# While "h" key is not pressed, ...
while not stop:
# Loop over all runs
for b, block in enumerate(exp.blocks[start_block:]):
block_no = b + start_block
t_jit = 0
# Display fixation cross that sets the beginning of the experiment
fixcross_ttl.present()
# Wait for TTL
exp.keyboard.wait_char(setting["TTL"])
exp.screen.clear()
exp.screen.update()
# Creates the clock
t0 = misc.Clock()
# Wait INITIALWAIT seconds before the beginning of the trial
fixcross_isi.present()
exp.clock.wait(setting["INITIALWAIT"])
# Loop over all trials within a block
for t, trial in enumerate(block.trials):
# Getter for the time in milliseconds since clock init.
# Time for the beginning of the trial
t_start = t0.time
# Present stimulus
for s, stimulus in enumerate(trial.stimuli):
if len(trial.stimuli) > 1:
if s == 0:
stimulus.present(update=False)
elif s == len(trial.stimuli) - 1:
stimulus.present(clear=False)
else:
stimulus.present(clear=False, update=False)
else:
stimulus.present()
# Jittered duration during rest,
# i.e. between encoding and recognition
if len(trial.stimuli) == 1:
jit_rest = design.randomize.rand_int(10000, 14000)
found_key, _ = exp.keyboard.wait(keys=[misc.constants.K_h],
duration=jit_rest)
# If "h" key is pressed, returns to main menu
if found_key == misc.constants.K_h:
stop = True
break
diff_mean_rest = 1000 - jit_rest
t_jit = t_jit + diff_mean_rest
# Calculate total duration of the rest period
duration_rest = t0.time - t_start
# Log file registry for rest
exp.data.add([block_no, t,
trial.get_factor(setting["var_names"][0]),
trial.get_factor(setting["var_names"][2]),
t_start, duration_rest])
else:
# Duration of active trials
if len(trial.stimuli) == 4:
key, rt = exp.keyboard.wait_char([setting["YES"],
setting["NO"]],
duration=5000)
t_end = t0.time
t_diff = t_end - t_start
if t_diff < 5000:
exp.clock.wait(5000-t_diff)
# Calculate total duration of the active condition
duration_active = t0.time - t_start
# Log file registry for the active condition
exp.data.add([block_no, t,
trial.get_factor(setting["var_names"][0]),
trial.get_factor(setting["var_names"][1]),
trial.get_factor(setting["var_names"][2]),
trial.get_factor(setting["var_names"][3]),
trial.get_factor(setting["var_names"][4]),
t_start, duration_active, key, rt])
# Duration of instruction trial
else:
found_key, _ = exp.keyboard.wait(
keys=[misc.constants.K_h], duration=5000)
# If "h" key is pressed, returns to main menu
if found_key == misc.constants.K_h:
stop = True
break
# Calculate total duration of the instruction
duration_inst = t0.time - t_start
# Log file registry for the instruction trials
exp.data.add([block_no, t,
trial.get_factor(setting["var_names"][0]),
trial.get_factor(setting["var_names"][2]),
t_start, duration_inst])
# Jittered ISI fixation cross
fixcross_isi.present()
jit_isi = design.randomize.rand_int(300, 700)
found_key, _ = exp.keyboard.wait(keys=[misc.constants.K_h],
duration=jit_isi)
# If "h" key is pressed, returns to main menu
if found_key == misc.constants.K_h:
stop = True
break
diff_mean_isi = 500 - jit_isi
t_jit = t_jit + diff_mean_isi
if stop:
break
# Display fixation cross in the end of the session
fixcross_isi.present()
found_key, _ = exp.keyboard.wait(keys=[misc.constants.K_h],
duration=15000 + t_jit)
# If "h" key is pressed, returns to main menu
if found_key == misc.constants.K_h:
stop = True
break
# In the end of each session:
if block_no < (nb_block - 1):
fixcross_isi.present()
# Display message: "End of Session"
blockend_message.present()
found_key, _ = exp.keyboard.wait(keys=[misc.constants.K_RETURN,
misc.constants.K_h])
if found_key == misc.constants.K_h:
stop = True
break
# In the end of the experiment:
elif block_no == (nb_block - 1):
fixcross_isi.present()
# Display message: "End of the Experiment"
text_end.present()
found_key, _ = exp.keyboard.wait(keys=[misc.constants.K_RETURN,
misc.constants.K_h],
duration=5000)
# Leave while loop
stop = True
|
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Core module views
"""
from django.contrib.auth import authenticate, login, logout
from django.contrib.sessions.models import Session
from django.contrib.sites.models import RequestSite
# from django.contrib.csrf.middleware import CsrfMiddleware as csrf
from django.utils.encoding import smart_unicode
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.cache import cache_control
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404, HttpResponse, HttpResponseBadRequest
from django.core.urlresolvers import resolve, reverse
from django.shortcuts import get_object_or_404
from treeio.core.conf import settings
from treeio.core.decorators import treeio_login_required, handle_response_format
from treeio.core.forms import LoginForm, PasswordResetForm, InvitationForm, SqlSettingsForm
from treeio.core.models import Object, Module, ModuleSetting, Perspective, User, Attachment, Invitation, Tag, \
UpdateRecord
from treeio.core.rendering import render_to_response
from jinja2 import Markup
from os.path import join
import re
import json
import urllib2
import random
@handle_response_format
@treeio_login_required
def user_logout(request, response_format='html'):
"User logout"
logout(request)
return HttpResponseRedirect(reverse('user_login'))
@handle_response_format
def user_login(request, response_format='html'):
"User login"
if request.user.username:
return HttpResponseRedirect(reverse('user_denied'))
next = request.GET.get('next', '/')
form = LoginForm(request.POST)
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user and getattr(settings, 'HARDTREE_DISABLE_EVERGREEN_USERS', False) and 'evergreen_' in user.username[:10]:
user = None
if form.is_valid():
if user is not None:
try:
profile = user.profile
except:
profile = None
if not profile:
return render_to_response('core/user_login', {
'error_message': 'Username or password you entered is not valid', 'form': Markup(form)},
context_instance=RequestContext(request), response_format=response_format)
if profile.disabled:
return render_to_response('core/user_login', {
'error_message': 'Your account is disabled.',
'form': Markup(form)},
context_instance=RequestContext(request),
response_format=response_format)
if user.is_active and profile:
# Disable account with overdue payment
if getattr(settings, "HARDTREE_SUBSCRIPTION_BLOCKED", False):
return render_to_response('core/user_login', {
'error_message': 'We are sorry to inform you but your account has been deactivated. Please login to your <a href="https://www.tree.io/login/">control panel</a> to see details.',
'form': Markup(form)},
context_instance=RequestContext(request),
response_format=response_format)
login(request, user)
# Prevent same user from logging in at 2 different machines
if getattr(settings, "HARDTREE_MULTIPLE_LOGINS_DISABLED", False):
for ses in Session.objects.all():
if ses != request.session:
try:
data = ses.get_decoded()
if '_auth_user_id' in data and data['_auth_user_id'] == request.user.id:
ses.delete()
except Exception:
pass
if 'next' in request.POST:
return HttpResponseRedirect(request.POST['next'])
else:
return HttpResponseRedirect(next)
else:
return render_to_response('core/user_login_disabled',
context_instance=RequestContext(
request),
response_format=response_format)
else:
return render_to_response('core/user_login', {
'error_message': 'Username or password you entered is not valid', 'form': Markup(form)},
context_instance=RequestContext(request), response_format=response_format)
elif not form.is_valid() and user is None:
return render_to_response('core/user_login',
{'error_message': 'Username or password you entered is not valid', 'form': Markup(
form)},
context_instance=RequestContext(request), response_format=response_format)
else:
return render_to_response('core/user_login',
{'error_message': 'Please re-enter the text from the image',
'form': Markup(form)},
context_instance=RequestContext(request), response_format=response_format)
else:
return render_to_response('core/user_login', {'form': Markup(form)},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
def user_denied(request, message='', response_format='html'):
"User denied page"
response = render_to_response('core/user_denied',
{'message': message},
context_instance=RequestContext(request), response_format=response_format)
# response.status_code = 403
return response
@treeio_login_required
@handle_response_format
def user_perspective(request, response_format='html'):
"Change user perspective"
user = request.user.profile
if request.POST and 'core_perspective' in request.POST:
id = request.POST['core_perspective']
perspective = get_object_or_404(Perspective, pk=id)
if user.has_permission(perspective):
user.set_perspective(perspective)
return HttpResponseRedirect(reverse('home'))
@cache_control(private=True, must_revalidate=True, max_age=60)
def logo_image(request, gif=False, response_format='html'):
"Return current logo image"
staticpath = getattr(settings, 'STATIC_DOC_ROOT', './static')
logopath = staticpath + '/logo'
if gif:
logopath += '.gif'
mimetype = 'image/gif'
else:
logopath += '.png'
mimetype = 'image/png'
customlogo = ''
try:
conf = ModuleSetting.get_for_module('treeio.core', 'logopath')[0]
customlogo = getattr(
settings, 'MEDIA_ROOT', './static/media') + conf.value
except:
pass
logofile = ''
if customlogo:
try:
logofile = open(customlogo, 'rb')
except:
pass
if not logofile:
try:
logofile = open(logopath, 'rb')
except:
pass
return HttpResponse(logofile.read(), mimetype=mimetype)
def ajax_popup(request, popup_id='', url='/'):
"Handles pop up forms and requests, by extracting only the required content from response content"
view, args, kwargs = resolve(url)
if not request.user.username:
return HttpResponseRedirect('/accounts/login')
modules = Module.objects.all()
active = None
for module in modules:
try:
import_name = module.name + "." + \
settings.HARDTREE_MODULE_IDENTIFIER
hmodule = __import__(import_name, fromlist=[str(module.name)])
urls = hmodule.URL_PATTERNS
for regexp in urls:
if re.match(regexp, url):
active = module
except ImportError:
pass
except AttributeError:
pass
response = None
if active:
if not request.user.profile.has_permission(active):
response = user_denied(request, "You do not have access to the %s module" % unicode(active),
response_format='ajax')
if not response:
if view == ajax_popup:
raise Http404("OMG, I see myself!")
kwargs['request'] = request
kwargs['response_format'] = 'ajax'
response = view(*args, **kwargs)
# response = csrf().process_response(request, response)
module_inner = ""
regexp = r"<!-- module_content_inner -->(?P<module_inner>.*?)<!-- /module_content_inner -->"
blocks = re.finditer(regexp, response.content, re.DOTALL)
for block in blocks:
module_inner += block.group('module_inner').strip()
title = ""
regexp = r"<div class=\\\"title\\\">(?P<title>.*?)</div>"
blocks = re.finditer(regexp, response.content, re.DOTALL)
for block in blocks:
title += block.group('title').replace('\\n', '').strip()
if not title:
blocks = re.finditer(
r"<title>(?P<title>.*?)</title>", response.content, re.DOTALL)
for block in blocks:
title += block.group('title').replace('\\n', '').strip()
subtitle = ""
regexp = r"<div class=\\\"subtitle-block\\\">(?P<subtitle>.*?)</div>"
blocks = re.finditer(regexp, response.content, re.DOTALL)
for block in blocks:
subtitle += block.group('subtitle').replace('\\n', '').strip()
context = {'content': module_inner, 'title': title, 'subtitle': subtitle, 'popup_id': popup_id, 'url': request.path}
if settings.HARDTREE_RESPONSE_FORMATS['json'] in response.get('Content-Type', 'text/html'):
new_response = render_to_response('core/ajax_popup', context,
context_instance=RequestContext(request), response_format='json')
else:
new_response = HttpResponse(json.dumps({'popup': context}))
new_response.mimetype = settings.HARDTREE_RESPONSE_FORMATS['json']
try:
jsonresponse = json.loads(response.content)
if 'redirect' in jsonresponse:
new_response.status_code = 302
except Exception:
new_response.status_code = response.status_code
return new_response
def mobile_view(request, url='/'):
"Returns the same page in mobile view"
if not url:
url = '/'
view, args, kwargs = resolve(url)
if view == mobile_view:
raise Http404("OMG, I see myself!")
kwargs['request'] = request
kwargs['response_format'] = 'html'
response = view(*args, **kwargs)
# response = csrf().process_response(request, response)
if response.status_code == 302 and not response['Location'][:2] == '/m':
response['Location'] = '/m' + response['Location']
return response
def iframe_close(request, response_format='html'):
"For third-party resources, when returned back to Hardtree, close iframe"
return render_to_response('core/iframe_close', {},
context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
def database_setup(request, response_format='html'):
if not User.objects.all().count():
if request.POST:
sql_form = SqlSettingsForm(data=request.POST)
if sql_form.is_valid():
sql_form.create_database()
if sql_form.is_valid():
return HttpResponseRedirect('/')
else:
sql_form = SqlSettingsForm()
return render_to_response('core/database_setup', {'sql_form': sql_form},
context_instance=RequestContext(request), response_format=response_format)
return HttpResponseRedirect('/')
@treeio_login_required
def help_page(request, url='/', response_format='html'):
"Returns a Help page from Evergreen"
source = getattr(
settings, 'HARDTREE_HELP_SOURCE', 'http://127.0.0.1:7000/help')
if not url:
url = '/'
body = ''
try:
body = urllib2.urlopen(
source + url + '?domain=' + RequestSite(request).domain).read()
except:
pass
regexp = r"<!-- module_content_inner -->(?P<module_inner>.*?)<!-- /module_content_inner -->"
blocks = re.finditer(regexp, body, re.DOTALL)
for block in blocks:
body = smart_unicode(block.group('module_inner').strip())
return render_to_response('core/help_page', {'body': body},
context_instance=RequestContext(request),
response_format=response_format)
#
# AJAX lookups
#
@treeio_login_required
def ajax_object_lookup(request, response_format='html'):
"Returns a list of matching objects"
objects = []
if request.GET and 'term' in request.GET:
objects = Object.filter_permitted(request.user.profile,
Object.objects.filter(
object_name__icontains=request.GET['term']),
mode='x')[:10]
return render_to_response('core/ajax_object_lookup',
{'objects': objects},
context_instance=RequestContext(request),
response_format=response_format)
@treeio_login_required
def ajax_tag_lookup(request, response_format='html'):
"Returns a list of matching tags"
tags = []
if request.GET and 'term' in request.GET:
tags = Tag.objects.filter(name__icontains=request.GET['term'])
return render_to_response('core/ajax_tag_lookup',
{'tags': tags},
context_instance=RequestContext(request),
response_format=response_format)
#
# Widgets
#
@treeio_login_required
def widget_welcome(request, response_format='html'):
"Quick start widget, which users see when they first log in"
trial = False
if getattr(settings, 'HARDTREE_SUBSCRIPTION_USER_LIMIT') == 3:
trial = True
customization = getattr(
settings, 'HARDTREE_SUBSCRIPTION_CUSTOMIZATION', True)
return render_to_response('core/widgets/welcome', {'trial': trial, 'customization': customization},
context_instance=RequestContext(request), response_format=response_format)
#
# Passwords
#
@csrf_protect
def password_reset(request, response_format='html'):
"Password_reset sends the email with the new password"
if request.POST:
form = PasswordResetForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('password_reset_done'))
else:
form = PasswordResetForm()
return render_to_response('core/password_reset_form',
{'form': form},
context_instance=RequestContext(request),
response_format=response_format)
def password_reset_done(request, response_format='html'):
"Shows success message"
return render_to_response('core/password_reset_done',
context_instance=RequestContext(request),
response_format=response_format)
def invitation_retrieve(request, response_format='html'):
"Retrieve invitation and create account"
if request.user.username:
return HttpResponseRedirect('/')
email = request.REQUEST.get('email', None)
key = request.REQUEST.get('key', None)
if email and key:
try:
invitation = Invitation.objects.get(email=email, key=key)
except:
raise Http404
else:
raise Http404
if request.POST:
form = InvitationForm(invitation, request.POST)
if form.is_valid():
profile = form.save()
username = profile.user.username
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
invitation.delete()
login(request, user)
return HttpResponseRedirect('/')
else:
form = InvitationForm(invitation)
return render_to_response('core/invitation_retrieve',
{'invitation': invitation,
'form': form},
context_instance=RequestContext(request),
response_format=response_format)
def save_upload(uploaded, filename, raw_data):
'''
raw_data: if True, uploaded is an HttpRequest object with the file being
the raw post data
if False, uploaded has been submitted via the basic form
submission and is a regular Django UploadedFile in request.FILES
'''
try:
from io import FileIO, BufferedWriter
with BufferedWriter(FileIO(filename, "wb")) as dest:
# if the "advanced" upload, read directly from the HTTP request
# with the Django 1.3 functionality
if raw_data:
if isinstance(uploaded, basestring):
dest.write(uploaded)
else:
foo = uploaded.read(1024)
while foo:
dest.write(foo)
foo = uploaded.read(1024)
# if not raw, it was a form upload so read in the normal Django
# chunks fashion
else:
for c in uploaded.chunks():
dest.write(c)
# got through saving the upload, report success
return True
except IOError:
# could not open the file most likely
pass
return False
@treeio_login_required
def ajax_upload(request, object_id=None, record=None):
try:
object = None
if request.method == "POST":
if request.is_ajax():
# the file is stored raw in the request
upload = request
is_raw = True
# AJAX Upload will pass the filename in the querystring if it
# is the "advanced" ajax upload
try:
filename = request.GET['qqfile']
content_type = "application/octet-stream"
except KeyError:
return HttpResponseBadRequest("AJAX request not valid")
# not an ajax upload, so it was the "basic" iframe version with
# submission via form
else:
is_raw = False
if len(request.FILES) == 1:
# FILES is a dictionary in Django but Ajax Upload gives the uploaded file an
# ID based on a random number, so it cannot be guessed here in the code.
# Rather than editing Ajax Upload to pass the ID in the querystring,
# observer that each upload is a separate request,
# so FILES should only have one entry.
# Thus, we can just grab the first (and only) value in the
# dict.
upload = request.FILES.values()[0]
content_type = upload.content_type
else:
raise Http404("Bad Upload")
filename = upload.name
random.seed()
filehash = str(random.getrandbits(128))
savefile = join(
getattr(settings, 'MEDIA_ROOT'), 'attachments', filehash)
# save the file
success = save_upload(upload, savefile, is_raw)
attachment = Attachment(filename=filename,
mimetype=content_type,
uploaded_by=request.user.profile,
attached_file=filehash)
if record:
attachment.attached_record = record
about = record.about.all()
if about.count():
attachment.attached_object = about[0]
object = attachment.attached_object
else:
object = Object.objects.get(id=object_id)
attachment.attached_object = object
attachment.save()
if object:
object.set_last_updated()
# TODO: smart markup and return as string, and object id, different
# classnames,id or attribute for update records and objects
if success:
ret_json = {'success': success,
'object_id': object.id if object else None,
'update_id': record.id if record else None}
else:
ret_json = {'success': False,
'object_id': None,
'update_id': None}
return HttpResponse(json.dumps(ret_json))
except:
pass
@treeio_login_required
def ajax_upload_record(request, record_id=None):
record = UpdateRecord.objects.get(id=record_id)
return ajax_upload(request, None, record)
@treeio_login_required
def attachment_download(request, attachment_id):
try:
attachment = Attachment.objects.get(pk=attachment_id)
except Attachment.DoesNotExist:
raise Http404()
filepath = join(
getattr(settings, 'MEDIA_ROOT'), 'attachments', attachment.attached_file.name)
try:
data = open(filepath).read()
except IOError:
raise Http404()
response = HttpResponse(data, content_type=attachment.mimetype)
response[
'Content-Disposition'] = 'filename="%s"' % smart_unicode(attachment.filename)
return response
|
|
# -*- coding: utf-8 -*-
import furl
from rest_framework import status as http_status
import json
from future.moves.urllib.parse import quote
from lxml import etree
import requests
from framework.auth import authenticate, external_first_login_authenticate
from framework.auth.core import get_user, generate_verification_key
from framework.flask import redirect
from framework.exceptions import HTTPError
from website import settings
class CasError(HTTPError):
"""General CAS-related error."""
pass
class CasHTTPError(CasError):
"""Error raised when an unexpected error is returned from the CAS server."""
def __init__(self, code, message, headers, content):
super(CasHTTPError, self).__init__(code, message)
self.headers = headers
self.content = content
def __repr__(self):
return ('CasHTTPError({self.message!r}, {self.code}, '
'headers={self.headers}, content={self.content!r})').format(self=self)
__str__ = __repr__
class CasTokenError(CasError):
"""Raised if an invalid token is passed by the client."""
def __init__(self, message):
super(CasTokenError, self).__init__(http_status.HTTP_400_BAD_REQUEST, message)
class CasResponse(object):
"""A wrapper for an HTTP response returned from CAS."""
def __init__(self, authenticated=False, status=None, user=None, attributes=None):
self.authenticated = authenticated
self.status = status
self.user = user
self.attributes = attributes or {}
class CasClient(object):
"""HTTP client for the CAS server."""
def __init__(self, base_url):
self.BASE_URL = base_url
def get_login_url(self, service_url, campaign=None, username=None, verification_key=None):
"""
Get CAS login url with `service_url` as redirect location. There are three options:
1. no additional parameters provided -> go to CAS login page
2. `campaign=institution` -> go to CAS institution login page
3. `(username, verification_key)` -> CAS will verify this request automatically in background
:param service_url: redirect url after successful login
:param campaign: the campaign name, currently 'institution' only
:param username: the username
:param verification_key: the verification key
:return: dedicated CAS login url
"""
url = furl.furl(self.BASE_URL)
url.path.segments.append('login')
url.args['service'] = service_url
if campaign:
url.args['campaign'] = campaign
elif username and verification_key:
url.args['username'] = username
url.args['verification_key'] = verification_key
return url.url
def get_logout_url(self, service_url):
url = furl.furl(self.BASE_URL)
url.path.segments.append('logout')
url.args['service'] = service_url
return url.url
def get_profile_url(self):
url = furl.furl(self.BASE_URL)
url.path.segments.extend(('oauth2', 'profile',))
return url.url
def get_auth_token_revocation_url(self):
url = furl.furl(self.BASE_URL)
url.path.segments.extend(('oauth2', 'revoke'))
return url.url
def service_validate(self, ticket, service_url):
"""
Send request to CAS to validate ticket.
:param str ticket: CAS service ticket
:param str service_url: Service URL from which the authentication request originates
:rtype: CasResponse
:raises: CasError if an unexpected response is returned
"""
url = furl.furl(self.BASE_URL)
url.path.segments.extend(('p3', 'serviceValidate',))
url.args['ticket'] = ticket
url.args['service'] = service_url
resp = requests.get(url.url)
if resp.status_code == 200:
return self._parse_service_validation(resp.content)
else:
self._handle_error(resp)
def profile(self, access_token):
"""
Send request to get profile information, given an access token.
:param str access_token: CAS access_token.
:rtype: CasResponse
:raises: CasError if an unexpected response is returned.
"""
url = self.get_profile_url()
headers = {
'Authorization': 'Bearer {}'.format(access_token),
}
resp = requests.get(url, headers=headers)
if resp.status_code == 200:
return self._parse_profile(resp.content, access_token)
else:
self._handle_error(resp)
def _handle_error(self, response, message='Unexpected response from CAS server'):
"""Handle an error response from CAS."""
raise CasHTTPError(
code=response.status_code,
message=message,
headers=response.headers,
content=response.content,
)
def _parse_service_validation(self, xml):
resp = CasResponse()
doc = etree.fromstring(xml)
auth_doc = doc.xpath('/cas:serviceResponse/*[1]', namespaces=doc.nsmap)[0]
resp.status = str(auth_doc.xpath('local-name()'))
if (resp.status == 'authenticationSuccess'):
resp.authenticated = True
resp.user = str(auth_doc.xpath('string(./cas:user)', namespaces=doc.nsmap))
attributes = auth_doc.xpath('./cas:attributes/*', namespaces=doc.nsmap)
for attribute in attributes:
resp.attributes[str(attribute.xpath('local-name()'))] = str(attribute.text)
scopes = resp.attributes.get('accessTokenScope')
resp.attributes['accessTokenScope'] = set(scopes.split(' ') if scopes else [])
else:
resp.authenticated = False
return resp
def _parse_profile(self, raw, access_token):
data = json.loads(raw)
resp = CasResponse(authenticated=True, user=data['id'])
if data.get('attributes'):
resp.attributes.update(data['attributes'])
resp.attributes['accessToken'] = access_token
resp.attributes['accessTokenScope'] = set(data.get('scope', []))
return resp
def revoke_application_tokens(self, client_id, client_secret):
"""Revoke all tokens associated with a given CAS client_id"""
return self.revoke_tokens(payload={'client_id': client_id, 'client_secret': client_secret})
def revoke_tokens(self, payload):
"""Revoke a tokens based on payload"""
url = self.get_auth_token_revocation_url()
resp = requests.post(url, data=payload)
if resp.status_code == 204:
return True
else:
self._handle_error(resp)
def parse_auth_header(header):
"""
Given an Authorization header string, e.g. 'Bearer abc123xyz',
return a token or raise an error if the header is invalid.
:param header:
:return:
"""
parts = header.split()
if parts[0].lower() != 'bearer':
raise CasTokenError('Unsupported authorization type')
elif len(parts) == 1:
raise CasTokenError('Missing token')
elif len(parts) > 2:
raise CasTokenError('Token contains spaces')
return parts[1] # the token
def get_client():
return CasClient(settings.CAS_SERVER_URL)
def get_login_url(*args, **kwargs):
"""
Convenience function for getting a login URL for a service.
:param args: Same args that `CasClient.get_login_url` receives
:param kwargs: Same kwargs that `CasClient.get_login_url` receives
"""
return get_client().get_login_url(*args, **kwargs)
def get_institution_target(redirect_url):
return '/login?service={}&auto=true'.format(quote(redirect_url, safe='~()*!.\''))
def get_logout_url(*args, **kwargs):
"""
Convenience function for getting a logout URL for a service.
:param args: Same args that `CasClient.get_logout_url` receives
:param kwargs: Same kwargs that `CasClient.get_logout_url` receives
"""
return get_client().get_logout_url(*args, **kwargs)
def get_profile_url():
"""Convenience function for getting a profile URL for a user."""
return get_client().get_profile_url()
def make_response_from_ticket(ticket, service_url):
"""
Given a CAS ticket and service URL, attempt to validate the user and return a proper redirect response.
:param str ticket: CAS service ticket
:param str service_url: Service URL from which the authentication request originates
:return: redirect response
"""
service_furl = furl.furl(service_url)
# `service_url` is guaranteed to be removed of `ticket` parameter, which has been pulled off in
# `framework.sessions.before_request()`.
if 'ticket' in service_furl.args:
service_furl.args.pop('ticket')
client = get_client()
cas_resp = client.service_validate(ticket, service_furl.url)
if cas_resp.authenticated:
user, external_credential, action = get_user_from_cas_resp(cas_resp)
# user found and authenticated
if user and action == 'authenticate':
# if we successfully authenticate and a verification key is present, invalidate it
if user.verification_key:
user.verification_key = None
user.save()
# if user is authenticated by external IDP, ask CAS to authenticate user for a second time
# this extra step will guarantee that 2FA are enforced
# current CAS session created by external login must be cleared first before authentication
if external_credential:
user.verification_key = generate_verification_key()
user.save()
return redirect(get_logout_url(get_login_url(
service_url,
username=user.username,
verification_key=user.verification_key
)))
# if user is authenticated by CAS
# TODO [CAS-27]: Remove Access Token From Service Validation
return authenticate(
user,
cas_resp.attributes.get('accessToken', ''),
redirect(service_furl.url)
)
# first time login from external identity provider
if not user and external_credential and action == 'external_first_login':
from website.util import web_url_for
# orcid attributes can be marked private and not shared, default to orcid otherwise
fullname = u'{} {}'.format(cas_resp.attributes.get('given-names', ''), cas_resp.attributes.get('family-name', '')).strip()
# TODO [CAS-27]: Remove Access Token From Service Validation
user = {
'external_id_provider': external_credential['provider'],
'external_id': external_credential['id'],
'fullname': fullname,
'access_token': cas_resp.attributes.get('accessToken', ''),
'service_url': service_furl.url,
}
return external_first_login_authenticate(
user,
redirect(web_url_for('external_login_email_get'))
)
# Unauthorized: ticket could not be validated, or user does not exist.
return redirect(service_furl.url)
def get_user_from_cas_resp(cas_resp):
"""
Given a CAS service validation response, attempt to retrieve user information and next action.
The `user` in `cas_resp` is the unique GUID of the user. Please do not use the primary key `id`
or the email `username`. This holds except for the first step of ORCiD login.
:param cas_resp: the cas service validation response
:return: the user, the external_credential, and the next action
"""
from osf.models import OSFUser
if cas_resp.user:
user = OSFUser.load(cas_resp.user)
# cas returns a valid OSF user id
if user:
return user, None, 'authenticate'
# cas does not return a valid OSF user id
else:
external_credential = validate_external_credential(cas_resp.user)
# invalid cas response
if not external_credential:
return None, None, None
# cas returns a valid external credential
user = get_user(external_id_provider=external_credential['provider'],
external_id=external_credential['id'])
# existing user found
if user:
return user, external_credential, 'authenticate'
# user first time login through external identity provider
else:
return None, external_credential, 'external_first_login'
def validate_external_credential(external_credential):
"""
Validate the external credential, a string which is composed of the profile name and the technical identifier
of the external provider, separated by `#`. Return the provider and id on success.
:param external_credential: the external credential string
:return: provider and id
"""
# wrong format
if not external_credential or '#' not in external_credential:
return False
profile_name, technical_id = external_credential.split('#', 1)
# invalid external identity provider
if profile_name not in settings.EXTERNAL_IDENTITY_PROFILE:
return False
# invalid external id
if len(technical_id) <= 0:
return False
provider = settings.EXTERNAL_IDENTITY_PROFILE[profile_name]
return {
'provider': provider,
'id': technical_id,
}
|
|
from __future__ import (absolute_import, division, print_function)
import numpy as np
import re
import sys
import copy
from periodictable import formula
from qtpy.QtWidgets import QMainWindow, QApplication
from addie.utilities import load_ui
from qtpy import QtGui
from addie.processing.mantid.master_table.table_row_handler import TableRowHandler
from addie.processing.mantid.master_table.periodic_table.isotopes_handler import IsotopesHandler
from addie.processing.mantid.master_table.tree_definition import INDEX_OF_COLUMNS_WITH_CHEMICAL_FORMULA
def get_periodictable_formatted_element_and_number_of_atoms(element):
'''The goal of this method is to go from Mantid format of an element (Si28)2 to
the format accepted by the periodictable library (to calculate the molecular mass for example) (Si[28]2)'''
# if we have a single stable element
regular_expression_1 = r'^(?P<stable_element>[A-Z]{1}[a-z]{0,1}$)'
m1 = re.search(regular_expression_1, element)
if m1 is not None:
return [m1.group('stable_element'), 1.]
# stable with stochiometric coefficient
regular_expression_2 = r'^(?P<stable_element>[A-Z]{1}[a-z]{0,1})(?P<stochiometric_coefficient>\d*\.{0,1}\d*)$'
m2 = re.search(regular_expression_2, element)
if m2 is not None:
return ["{}{}".format(m2.group('stable_element'), m2.group('stochiometric_coefficient')),
np.float(m2.group('stochiometric_coefficient'))]
# isotope with or without stochiometric coefficient
regular_expression_3 = r'\((?P<isotope_element>[A-Z]{1}[a-z]{0,1})(?P<isotope_number>\d+)\)' \
r'(?P<stochiometric_coefficient>\d*\.{0,1}\d*)'
m3 = re.search(regular_expression_3, element)
if m3 is not None:
if m3.group('stochiometric_coefficient') == "":
number_of_atoms = 1.
else:
number_of_atoms = np.float(
m3.group('stochiometric_coefficient'))
return ["{}[{}]{}".format(m3.group('isotope_element'), m3.group('isotope_number'),
m3.group('stochiometric_coefficient')), number_of_atoms]
raise ValueError
def retrieving_molecular_mass_and_number_of_atoms_worked(chemical_formula):
'''this method will parse the formula to go from Mantid format to periodictable library format, in order
to calculate the molecular mass
return: True if the string has been correctly formatted
False if something went wrong
'''
list_element = chemical_formula.split(" ")
periodictable_list_element_format = []
total_number_of_atoms = 0.
try:
for _element in list_element:
[formated_element, number_of_atoms] = get_periodictable_formatted_element_and_number_of_atoms(
_element)
periodictable_list_element_format.append(formated_element)
total_number_of_atoms += number_of_atoms
periodictable_format = " ".join(periodictable_list_element_format)
periodictable_formula = formula(periodictable_format)
return periodictable_formula.mass, total_number_of_atoms
except:
return None, None
class MaterialHandler:
def __init__(self, parent=None, database_window=None, key=None, data_type='sample'):
if parent.material_ui is None:
o_material = PeriodicTable(parent=parent,
database_window=database_window,
key=key,
data_type=data_type)
o_material.show()
parent.material_ui = o_material
if parent.material_ui_position:
parent.material_ui.move(parent.material_ui_position)
else:
parent.material_ui.setFocus()
parent.material_ui.activateWindow()
class PeriodicTable(QMainWindow):
isotope_ui = None
list_ui_color = {'list_ui': None,
'color': None}
list_color = {0: copy.deepcopy(list_ui_color),
1: copy.deepcopy(list_ui_color),
2: copy.deepcopy(list_ui_color),
3: copy.deepcopy(list_ui_color),
4: copy.deepcopy(list_ui_color),
5: copy.deepcopy(list_ui_color),
6: copy.deepcopy(list_ui_color),
7: copy.deepcopy(list_ui_color),
}
column = 0
def __init__(self, parent=None, database_window=None, key=None, data_type='sample'):
self.parent = parent
self.database_window = database_window
self.key = key
self.data_type = data_type
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('periodic_table.ui', baseinstance=self)
self.setWindowTitle("Define Chemical Formula")
self.init_ui_color_dictionary()
self.init_widgets()
self.set_column_index()
def set_column_index(self):
if self.data_type == 'sample':
self.column = INDEX_OF_COLUMNS_WITH_CHEMICAL_FORMULA[0]
else:
self.column = INDEX_OF_COLUMNS_WITH_CHEMICAL_FORMULA[1]
def init_ui_color_dictionary(self):
# color of element buttons
# purple
self.list_color[0]['list_ui'] = [self.ui.h,
self.ui.c,
self.ui.n,
self.ui.o,
self.ui.p,
self.ui.s,
self.ui.se,
]
self.list_color[0]['color'] = "#938ac0"
# cyan
self.list_color[1]['list_ui'] = [self.ui.li,
self.ui.na,
self.ui.k,
self.ui.rb,
self.ui.cs,
self.ui.fr,
]
self.list_color[1]['color'] = "#99d5c2"
# light green
self.list_color[2]['list_ui'] = [self.ui.be,
self.ui.mg,
self.ui.ca,
self.ui.sr,
self.ui.ba,
self.ui.ra,
]
self.list_color[2]['color'] = "#c6e8c1"
# light yellow
self.list_color[3]['list_ui'] = [self.ui.b,
self.ui.si,
self.ui.ge,
self.ui.arsenic,
self.ui.sb,
self.ui.te,
self.ui.po,
]
self.list_color[3]['color'] = "#eef8b9"
# dark yellow
self.list_color[4]['list_ui'] = [self.ui.f,
self.ui.cl,
self.ui.br,
self.ui.i,
self.ui.at,
self.ui.ts,
]
self.list_color[4]['color'] = "#fee9b0"
# blue
self.list_color[5]['list_ui'] = [self.ui.he,
self.ui.ne,
self.ui.ar,
self.ui.kr,
self.ui.xe,
self.ui.rn,
self.ui.og,
]
self.list_color[5]['color'] = "#79afd1"
# light orange
self.list_color[6]['list_ui'] = [self.ui.al,
self.ui.ga,
self.ui.indium,
self.ui.sn,
self.ui.tl,
self.ui.pb,
self.ui.bi,
self.ui.nh,
self.ui.fl,
self.ui.mc,
self.ui.lv,
]
self.list_color[6]['color'] = "#fec796"
# dark orange
self.list_color[7]['list_ui'] = [self.ui.sc,
self.ui.ti,
self.ui.v,
self.ui.cr,
self.ui.mn,
self.ui.fe,
self.ui.co,
self.ui.ni,
self.ui.cu,
self.ui.zn,
self.ui.y,
self.ui.zr,
self.ui.nb,
self.ui.mo,
self.ui.tc,
self.ui.ru,
self.ui.rh,
self.ui.pd,
self.ui.ag,
self.ui.cd,
self.ui.lu,
self.ui.hf,
self.ui.ta,
self.ui.w,
self.ui.re,
self.ui.os,
self.ui.ir,
self.ui.pt,
self.ui.au,
self.ui.hg,
self.ui.lr,
self.ui.rf,
self.ui.db,
self.ui.sg,
self.ui.bh,
self.ui.hs,
self.ui.mt,
self.ui.ds,
self.ui.rg,
self.ui.cn,
]
self.list_color[7]['color'] = "#f79d83"
def init_widgets(self):
self.ui.save_button.setEnabled(False)
# init contain of chemical formula
if self.data_type == 'database':
# retrieve value from import_from_database label
text = str(
self.database_window.list_ui[self.key]['value_label'].text())
else:
# retrieve value from sample or normalization columns in master table
text = str(
self.parent.master_table_list_ui[self.key][self.data_type]['material']['text'].text())
if text == 'N/A':
text = ""
self.ui.chemical_formula.setText(text)
# set color of buttons
for _key in self.list_color.keys():
_list_ui = self.list_color[_key]['list_ui']
_color = self.list_color[_key]['color']
for _ui in _list_ui:
_ui.setStyleSheet("background-color:{}".format(_color))
# clear button icon
self.ui.clear_button.setIcon(
QtGui.QIcon(":/MPL Toolbar/clear_icon.png"))
def reset_text_field(self):
self.ui.chemical_formula.setText("")
def chemical_formula_changed(self, new_formula):
self.check_status_save_button()
def add_new_entry(self, isotope='', number=1, is_natural_element=False):
if isotope == '':
return
previous_chemical_formula = str(self.ui.chemical_formula.text())
if is_natural_element:
if number == 1:
number = ''
new_isotope_string = "{}{}".format(isotope, number)
elif number > 1:
new_isotope_string = "({}){}".format(isotope, number)
else:
new_isotope_string = "({})".format(isotope)
if previous_chemical_formula != '':
new_chemical_formula = previous_chemical_formula + ' ' + new_isotope_string
else:
new_chemical_formula = new_isotope_string
self.ui.chemical_formula.setText(new_chemical_formula)
self.ui.chemical_formula.setFocus()
# make chemical formula editable (bug in pyqt that sometimes turn off editable)
def click_button(self, element):
IsotopesHandler(parent=self, element=element.title())
def h_button(self):
self.click_button('h')
def li_button(self):
self.click_button('li')
def he_button(self):
self.click_button('he')
def be_button(self):
self.click_button('be')
def b_button(self):
self.click_button('b')
def c_button(self):
self.click_button('c')
def n_button(self):
self.click_button('n')
def o_button(self):
self.click_button('o')
def f_button(self):
self.click_button('f')
def ne_button(self):
self.click_button('ne')
def na_button(self):
self.click_button('na')
def mg_button(self):
self.click_button('mg')
def al_button(self):
self.click_button('al')
def si_button(self):
self.click_button('si')
def p_button(self):
self.click_button('p')
def s_button(self):
self.click_button('s')
def cl_button(self):
self.click_button('cl')
def ar_button(self):
self.click_button('ar')
def k_button(self):
self.click_button('k')
def ca_button(self):
self.click_button('ca')
def sc_button(self):
self.click_button('sc')
def ti_button(self):
self.click_button('ti')
def v_button(self):
self.click_button('v')
def cr_button(self):
self.click_button('cr')
def mn_button(self):
self.click_button('mn')
def fe_button(self):
self.click_button('fe')
def co_button(self):
self.click_button('co')
def ni_button(self):
self.click_button('ni')
def cu_button(self):
self.click_button('cu')
def zn_button(self):
self.click_button('zn')
def ga_button(self):
self.click_button('ga')
def ge_button(self):
self.click_button('ge')
def as_button(self):
self.click_button('as')
def se_button(self):
self.click_button('se')
def br_button(self):
self.click_button('br')
def kr_button(self):
self.click_button('kr')
def rb_button(self):
self.click_button('rb')
def sr_button(self):
self.click_button('sr')
def y_button(self):
self.click_button('y')
def zr_button(self):
self.click_button('zr')
def nb_button(self):
self.click_button('nb')
def mo_button(self):
self.click_button('mo')
def tc_button(self):
self.click_button('tc')
def ru_button(self):
self.click_button('ru')
def rh_button(self):
self.click_button('rh')
def pd_button(self):
self.click_button('pd')
def ag_button(self):
self.click_button('ag')
def cd_button(self):
self.click_button('cd')
def in_button(self):
self.click_button('in')
def sn_button(self):
self.click_button('sn')
def sb_button(self):
self.click_button('sb')
def te_button(self):
self.click_button('te')
def i_button(self):
self.click_button('i')
def xe_button(self):
self.click_button('xe')
def cs_button(self):
self.click_button('cs')
def ba_button(self):
self.click_button('ba')
def lu_button(self):
self.click_button('lu')
def hf_button(self):
self.click_button('hf')
def ta_button(self):
self.click_button('ta')
def w_button(self):
self.click_button('w')
def re_button(self):
self.click_button('re')
def os_button(self):
self.click_button('os')
def ir_button(self):
self.click_button('ir')
def pt_button(self):
self.click_button('pt')
def au_button(self):
self.click_button('au')
def hg_button(self):
self.click_button('hg')
def tl_button(self):
self.click_button('tl')
def pb_button(self):
self.click_button('pb')
def bi_button(self):
self.click_button('bi')
def po_button(self):
self.click_button('po')
def at_button(self):
self.click_button('at')
def rn_button(self):
self.click_button('rn')
def fr_button(self):
self.click_button('fr')
def ra_button(self):
self.click_button('ra')
def lr_button(self):
self.click_button('lr')
def rf_button(self):
self.click_button('rf')
def db_button(self):
self.click_button('db')
def sg_button(self):
self.click_button('sg')
def bh_button(self):
self.click_button('bh')
def hs_button(self):
self.click_button('hs')
def mt_button(self):
self.click_button('mt')
def ds_button(self):
self.click_button('ds')
def rg_button(self):
self.click_button('rg')
def cn_button(self):
self.click_button('cn')
def nh_button(self):
self.click_button('nh')
def fl_button(self):
self.click_button('fl')
def mc_button(self):
self.click_button('mc')
def lv_button(self):
self.click_button('lv')
def ts_button(self):
self.click_button('ts')
def og_button(self):
self.click_button('og')
def ok(self):
chemical_formula = str(self.ui.chemical_formula.text())
molecular_mass, total_number_of_atoms = retrieving_molecular_mass_and_number_of_atoms_worked(chemical_formula)
if molecular_mass and total_number_of_atoms:
self.parent.material_ui = None
if self.data_type == 'database':
ui = self.database_window.list_ui[self.key]['value_label']
ui.setText(chemical_formula)
else: # 'sample' or 'normalization'
text_ui = self.parent.master_table_list_ui[self.key][self.data_type]['material']['text']
text_ui.setText(chemical_formula)
o_table = TableRowHandler(main_window=self.parent)
o_table.transfer_widget_states(
from_key=self.key, data_type=self.data_type)
self.parent.master_table_list_ui[self.key][self.data_type]['mass_density_infos']['molecular_mass'] \
= molecular_mass
self.parent.master_table_list_ui[self.key][self.data_type]['mass_density_infos']['total_number_of_atoms'] \
= total_number_of_atoms
self.parent.check_master_table_column_highlighting(
column=self.column)
self.close()
else:
self.ui.statusbar.setStyleSheet("color: red")
self.ui.statusbar.showMessage("Unable to calculate Molecular Mass! CHECK YOUR FORMULA!",
self.parent.statusbar_display_time)
def check_status_save_button(self):
if str(self.ui.chemical_formula.text()) != "":
self.ui.save_button.setEnabled(True)
else:
self.ui.save_button.setEnabled(False)
def cancel(self):
self.parent.material_ui = None
self.close()
def closeEvent(self, c):
self.parent.material_ui = None
self.parent.material_ui_position = self.pos()
if __name__ == "__main__":
app = QApplication(sys.argv)
o_dialog = PeriodicTable()
o_dialog.show()
app.exec_()
|
|
"""
This module contains the default values for all settings used by Scrapy.
For more information about these settings you can read the settings
documentation in docs/topics/settings.rst
Scrapy developers, if you add a setting here remember to:
* add it in alphabetical order
* group similar settings without leaving blank lines
* add its documentation to the available settings documentation
(docs/topics/settings.rst)
"""
import os
import sys
from importlib import import_module
from os.path import join, abspath, dirname
import six
AJAXCRAWL_ENABLED = False
AUTOTHROTTLE_ENABLED = False
AUTOTHROTTLE_DEBUG = False
AUTOTHROTTLE_MAX_DELAY = 60.0
AUTOTHROTTLE_START_DELAY = 5.0
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
BOT_NAME = 'scrapybot'
CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0
COMMANDS_MODULE = ''
COMPRESSION_ENABLED = True
CONCURRENT_ITEMS = 100
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0
COOKIES_ENABLED = True
COOKIES_DEBUG = False
DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
DEPTH_LIMIT = 0
DEPTH_STATS = True
DEPTH_PRIORITY = 0
DNSCACHE_ENABLED = True
DNSCACHE_SIZE = 10000
DNS_TIMEOUT = 60
DOWNLOAD_DELAY = 0
DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
}
DOWNLOAD_TIMEOUT = 180 # 3mins
DOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m
DOWNLOAD_WARNSIZE = 32*1024*1024 # 32m
DOWNLOADER = 'scrapy.core.downloader.Downloader'
DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
DOWNLOADER_CLIENT_TLS_METHOD = 'TLS' # Use highest TLS/SSL protocol version supported by the platform,
# also allowing negotiation
DOWNLOADER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,
'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,
'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,
# Downloader side
}
DOWNLOADER_STATS = True
DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'
try:
EDITOR = os.environ['EDITOR']
except KeyError:
if sys.platform == 'win32':
EDITOR = '%s -m idlelib.idle'
else:
EDITOR = 'vi'
EXTENSIONS = {}
EXTENSIONS_BASE = {
'scrapy.extensions.corestats.CoreStats': 0,
'scrapy.extensions.telnet.TelnetConsole': 0,
'scrapy.extensions.memusage.MemoryUsage': 0,
'scrapy.extensions.memdebug.MemoryDebugger': 0,
'scrapy.extensions.closespider.CloseSpider': 0,
'scrapy.extensions.feedexport.FeedExporter': 0,
'scrapy.extensions.logstats.LogStats': 0,
'scrapy.extensions.spiderstate.SpiderState': 0,
'scrapy.extensions.throttle.AutoThrottle': 0,
}
FEED_TEMPDIR = None
FEED_URI = None
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_FORMAT = 'jsonlines'
FEED_STORE_EMPTY = False
FEED_EXPORT_ENCODING = None
FEED_EXPORT_FIELDS = None
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
'': 'scrapy.extensions.feedexport.FileFeedStorage',
'file': 'scrapy.extensions.feedexport.FileFeedStorage',
'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',
's3': 'scrapy.extensions.feedexport.S3FeedStorage',
'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',
}
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
'json': 'scrapy.exporters.JsonItemExporter',
'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',
'jl': 'scrapy.exporters.JsonLinesItemExporter',
'csv': 'scrapy.exporters.CsvItemExporter',
'xml': 'scrapy.exporters.XmlItemExporter',
'marshal': 'scrapy.exporters.MarshalItemExporter',
'pickle': 'scrapy.exporters.PickleItemExporter',
}
FILES_STORE_S3_ACL = 'private'
HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_ALWAYS_STORE = False
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []
HTTPCACHE_DBM_MODULE = 'anydbm' if six.PY2 else 'dbm'
HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'
HTTPCACHE_GZIP = False
HTTPPROXY_AUTH_ENCODING = 'latin-1'
IMAGES_STORE_S3_ACL = 'private'
ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'
ITEM_PIPELINES = {}
ITEM_PIPELINES_BASE = {}
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'
LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
SCHEDULER_DEBUG = False
LOGSTATS_INTERVAL = 60.0
MAIL_HOST = 'localhost'
MAIL_PORT = 25
MAIL_FROM = 'scrapy@localhost'
MAIL_PASS = None
MAIL_USER = None
MEMDEBUG_ENABLED = False # enable memory debugging
MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
MEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0
MEMUSAGE_ENABLED = False
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_REPORT = False
MEMUSAGE_WARNING_MB = 0
METAREFRESH_ENABLED = True
METAREFRESH_MAXDELAY = 100
NEWSPIDER_MODULE = ''
RANDOMIZE_DOWNLOAD_DELAY = True
REACTOR_THREADPOOL_MAXSIZE = 10
REDIRECT_ENABLED = True
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2
REFERER_ENABLED = True
RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 502, 503, 504, 408]
RETRY_PRIORITY_ADJUST = -1
ROBOTSTXT_OBEY = False
SCHEDULER = 'scrapy.core.scheduler.Scheduler'
SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'
SCHEDULER_PRIORITY_QUEUE = 'queuelib.PriorityQueue'
SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'
SPIDER_MIDDLEWARES = {}
SPIDER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,
'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,
'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,
# Spider side
}
SPIDER_MODULES = []
STATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'
STATS_DUMP = True
STATSMAILER_RCPTS = []
TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
URLLENGTH_LIMIT = 2083
USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__
TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = '127.0.0.1'
SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
'scrapy.contracts.default.UrlContract': 1,
'scrapy.contracts.default.ReturnsContract': 2,
'scrapy.contracts.default.ScrapesContract': 3,
}
|
|
"""
Module to set up run time parameters for Clawpack -- classic code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='amrclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "amrclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
# Sample setup to write one line to setprob.data ...
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
# Gamma EOS parameter for three materials: air, plastic and water
probdata.add_param('gammagas', 1.4, 'gamma for ideal gas')
probdata.add_param('gammaplas', 1.1, 'gamma est. for polystirene')
probdata.add_param('gammawat', 7.15, 'gamma for water')
# Pinf parameter for Tammann EOS for three materials: air, plastic and water
# pinfplas Calculated with c^2=gamma*(p+pinf)/rho to make c =2240m/s (polystyrene speed of sound),
# (c= 1484 in water). Values from water obtained from kirsten's paper in the references
probdata.add_param('pinfgas', 0.0, 'pinf for stiffend gas/plastic')
probdata.add_param('pinfplas', 4789425947.72, 'pinf for stiffend gas/plastic')
probdata.add_param('pinfwat', 300000000.0, 'pinf for stiffend water')
# Density at rest and at room temperature of: air, plastic and water
probdata.add_param('rhog', 1.0, 'air density in kg/m^3')
probdata.add_param('rhop', 1050.0, 'polystirene density in kg/m^3')
probdata.add_param('rhow', 1000.0, 'water density in kg/m^3')
# omegas, omeplas and omewat are not used in this code, but they can be used to extend the code
# to a more general EOS with an extra parameter, like de Van der Waals EOS.
probdata.add_param('omegas', 0.0, 'omega (specific excluded volume) for stiffend gas/plastic')
probdata.add_param('omeplas', 0.0, 'omega (specific excluded volume) for stiffend gas/plastic')
probdata.add_param('omewat', 0.0, 'omega (specific excluded volume) for stiffend water')
# Parameters for mapped grid (if changed, they also need to be changed in mapc2p.py, also
# double check the mapc2p.py test works correctly with the new parameters)
probdata.add_param('rsqr', 0.039999, 'radius of outer square for mapped grid')
probdata.add_param('rout', 0.015, 'radius of outer circle of ring inclusion for mapped grid')
probdata.add_param('rinn', 0.010, 'radius of inner circle of ring inclusion for mapped grid')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -0.05 #-0.055 # xlower
clawdata.upper[0] = 0.05 #0.055 # xupper
clawdata.lower[1] = 0.000000e+00 # ylower
clawdata.upper[1] = 0.05000e+00 # yupper
# Number of grid cells:
# For original mapped grid, used multiples of 20x10, so the interface is aligned
clawdata.num_cells[0] = 40 #40 #40 #56-mymapping #40-randys # mx
clawdata.num_cells[1] = 20 #20 #14 #17-mymapping #14-randys # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 4
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 15
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 14
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 150
clawdata.tfinal = 0.0002 #0.00025 #0.00015 #0.00015
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format == 'ascii' # 'ascii' or 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all' # could be list
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.000000e-07
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.5 #0.600000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 0.6 #0.700000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 500000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting?
clawdata.dimensional_split = 'unsplit' # 'godunov' #'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 1 #2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter (see inline limiter.f to
# enable or disable modified minmod (TVD, only first order, "add viscosity")
clawdata.limiter = [1, 1, 1] #[1, 1, 1]
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'user' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
ckouttime0 = 50.00/1000000.0
ckouttime1 = 63.33/1000000.0
clawdata.checkpt_times = [ckouttime0, ckouttime1]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# Gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
gauges.append([0, -0.01, 0, 0., 1e9])
gauges.append([1, -0.01, 0.005, 0., 1e9])
gauges.append([2, -0.01, 0.01, 0., 1e9])
gauges.append([3, 0.0, 0, 0., 1e9])
gauges.append([4, 0.0, 0.005, 0., 1e9])
gauges.append([5, 0.0, 0.01, 0., 1e9])
gauges.append([6, 0.01, 0, 0., 1e9])
gauges.append([7, 0.01, 0.005, 0., 1e9])
gauges.append([8, 0.01, 0.01, 0., 1e9])
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2, 2, 2, 2, 2]
amrdata.refinement_ratios_y = [2, 2, 2, 2, 2]
amrdata.refinement_ratios_t = [2, 2, 2, 2, 2]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one
# of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','center','center','center','center','center','center','center','center','center','center','center','center','capacity','center']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.000000e-00 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 5000.0 #10000.0 #10000.0 #10.000000 #100000.0 #5.000000e-02 # tolerance used in this routine (100000000.000000)
# User can modify flag2refine to change the criterion for flagging.
# Default: check maximum absolute difference of first component of q
# between a cell and each of its neighbors.
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 2
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2 #3
# clustering alg. cutoff for (# flagged pts) / (total # of cells
# refined)
# (closer to 1.0 => more small grids may be needed to cover flagged
# cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# -------------------
# Refinement Regions:
# -------------------
regions = rundata.regiondata.regions
# Remove initial spurious wave from interface coupling by not refining until t_0
t_0 = 1.67/1000000.0
# NOT REQUIRED IF MAPPING NOT PRESENT
# All of the water region (for level 2 with mapping)
#regions.append([2,2,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# All of the water region (for level 3 with mapping)
#regions.append([3,3,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# Regions along interface (for level 3 with mapping)
#regions.append([3,3,0,1e9,-0.0155,-0.0145, 0.0, 0.0155])
#regions.append([3,3,0,1e9,0.0145,0.0155, 0.0, 0.0155])
#regions.append([3,3,0,1e9,-0.0155, 0.0155, 0.0145, 0.0155])
# Regions along interface (for level 4 with mapping)
regions.append([4,4,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# Regions along interface (for level 5 with mapping)
#regions.append([5,5,0,1e9,-0.0155,0.0155, 0.0, 0.0155])
# Regions along interface (for level 5 with mapping)
#regions.append([5,5,0,1e9,-0.02,0.02, 0.0, 0.02])
# Regions along interface (for level 6 with mapping)
#regions.append([6,6,0,1e9,-0.02,0.02, 0.0, 0.0155])
# Regions along interface (for level 6 without mapping)
#regions.append([5,6,t_0,1e9,-0.025,0.025, 0.0, 0.025])
# Along one corner for Schlieren
##regions.append([6,6,t_0,1e9,-0.02,0.005, 0.005, 0.02])
##regions.append([6,6,t_0,1e9,-0.02,0.02, -0.02, 0.02])
##regions.append([6,6,t_0,1e9,-0.02,0.02, -0.02, 0.02])
##regions.append([5,5,t_0,1e9,-0.01,0.02, 0.01, 0.02])
#regions.append([5,5,t_0,1e9,-0.02,-0.01, 0.01, 0.02])
#OTHER COMBINATIONS
# Interface corners
#regions.append([3,3,0,1e9,-0.0155,-0.0145, 0.0145, 0.0155])
#regions.append([3,3,0,1e9,0.0145,0.0155, 0.0145, 0.0155])
#regions.append([2,3,4e-6,1e9,-0.03,-0.029, 0.0, 0.030])
#regions.append([3,3,0,1e9,-0.02,0.02, 0.0, 0.02])
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
|
import collections as col
import copy
import itertools
import math
import operator
import thalesians.tsa.intervals
# Based on an answer by Gustavo Bezerra on Stack Overflow
# https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
def is_notebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
# Jupyter notebook or qtconsole
return True
elif shell == 'TerminalInteractiveShell':
# Terminal running IPython
return False
else:
# Other type (?)
return False
except NameError:
# Probably a standard Python interpreter
return False
def sequence_eq(sequence1, sequence2):
"""
Compares two sequences.
Parameters
----------
sequence1 : sequence
The first sequence.
sequence2 : sequence
The second sequence.
Returns
-------
bool
`True` iff `sequence1` equals `sequence2`, otherwise `False`.
"""
return len(sequence1) == len(sequence2) and all(map(operator.eq, sequence1, sequence2))
def cmp(x, y):
return (x > y) - (x < y)
def most_common(iterable):
"""
>>> most_common(['foo', 'bar', 'bar', 'foo', 'bar'])
'bar'
>>> most_common(['foo', 'bar', 'bar', 'foo'])
'foo'
>>> most_common(['foo', 'bar'])
'foo'
"""
sorted_iterable = sorted((x, i) for i, x in enumerate(iterable))
groups = itertools.groupby(sorted_iterable, key=operator.itemgetter(0))
def _auxfun(g):
_, it = g
count = 0
min_index = len(iterable)
for _, where in it:
count += 1
min_index = min(min_index, where)
return count, -min_index
return max(groups, key=_auxfun)[0]
def prepend(collection, to_prepend, in_place=False):
if not in_place: collection = copy.copy(collection)
collection[0:0] = to_prepend
return collection
def _pad_on_left_with_callable(collection, new_len, padding=None):
return prepend(collection, [padding() for _ in range(new_len - len(collection))], in_place=True)
def _pad_on_left_with_noncallable(collection, new_len, padding=None):
return prepend(collection, [padding for _ in range(new_len - len(collection))], in_place=True)
def pad_on_left(collection, new_len, padding=None, in_place=False):
if not in_place: collection = copy.copy(collection)
if hasattr(padding, '__call__') or isinstance(padding, col.Callable):
return _pad_on_left_with_callable(collection, new_len, padding)
else:
return _pad_on_left_with_noncallable(collection, new_len, padding)
def _pad_on_right_with_callable(collection, new_len, padding=None):
collection.extend([padding() for _ in range(new_len - len(collection))])
return collection
def _pad_on_right_with_noncallable(collection, new_len, padding=None):
collection.extend([padding for _ in range(new_len - len(collection))])
return collection
def pad_on_right(collection, new_len, padding=None, in_place=False):
if not in_place: collection = copy.copy(collection)
if hasattr(padding, '__call__') or isinstance(padding, col.Callable):
return _pad_on_right_with_callable(collection, new_len, padding)
else:
return _pad_on_right_with_noncallable(collection, new_len, padding)
def trim_on_left(collection, new_len, in_place=False):
if not in_place: collection = copy.copy(collection)
del collection[:max(len(collection) - new_len, 0)]
return collection
def trim_on_right(collection, new_len, in_place=False):
if not in_place: collection = copy.copy(collection)
del collection[new_len:]
return collection
def xconst(value):
while True: yield value
def xbatch(size, iterable):
"""
>>> list(xbatch(2, range(10)))
[range(0, 2), range(2, 4), range(4, 6), range(6, 8), range(8, 10)]
>>> list(xbatch(3, ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']))
[['Jan', 'Feb', 'Mar'], ['Apr', 'May', 'Jun'], ['Jul', 'Aug', 'Sep'], ['Oct', 'Nov', 'Dec']]
>>> list(xbatch(3, ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')))
[('Jan', 'Feb', 'Mar'), ('Apr', 'May', 'Jun'), ('Jul', 'Aug', 'Sep'), ('Oct', 'Nov', 'Dec')]
>>> import numpy as np
>>> list(xbatch(2, np.array(range(10))))
[array([0, 1]), array([2, 3]), array([4, 5]), array([6, 7]), array([8, 9])]
>>> list(xbatch(2, range(10)))
[range(0, 2), range(2, 4), range(4, 6), range(6, 8), range(8, 10)]
"""
l = len(iterable)
for i in range(0, l, size):
yield iterable[i:min(i + size, l)]
def batch(size, iterable):
"""
>>> batch(2, range(10))
[range(0, 2), range(2, 4), range(4, 6), range(6, 8), range(8, 10)]
>>> batch(3, [429, 5, 2, 14, 42, 132, 1, 1])
[[429, 5, 2], [14, 42, 132], [1, 1]]
>>> batch(4, range(10))
[range(0, 4), range(4, 8), range(8, 10)]
"""
return list(xbatch(size, iterable))
def peek(iterable, size=1):
"""
>>> it = xbatch(2, range(10))
>>> first_three, new_it = peek(it, 3)
>>> first_three
[range(0, 2), range(2, 4), range(4, 6)]
>>> list(new_it)
[range(0, 2), range(2, 4), range(4, 6), range(6, 8), range(8, 10)]
>>> list(it)
[]
>>> it = xbatch(2, range(10))
>>> first_three, new_it = peek(it, 3)
>>> first_three
[range(0, 2), range(2, 4), range(4, 6)]
>>> list(it)
[range(6, 8), range(8, 10)]
"""
objs = []
for _ in range(size):
try:
obj = next(iterable)
except StopIteration:
break
objs.append(obj)
return objs, itertools.chain(objs, iterable)
def intervals(start, end, delta, intervals_right_closed=False):
"""
>>> intervals(start=0, end=15, delta=5, intervals_right_closed=False)
[[0, 5), [5, 10), [10, 15)]
>>> intervals(start=0, end=15, delta=5, intervals_right_closed=True)
[(0, 5], (5, 10], (10, 15]]
>>> intervals(start=0, end=15, delta=4, intervals_right_closed=False)
[[0, 4), [4, 8), [8, 12), [12, 15)]
>>> intervals(start=0, end=15, delta=4, intervals_right_closed=True)
[(0, 4], (4, 8], (8, 12], (12, 15]]
>>> import datetime as dt
>>> intervals(start=dt.date(2019, 8, 31), end=dt.date(2019, 9, 15), delta=dt.timedelta(days=5), intervals_right_closed=False)
[[2019-08-31, 2019-09-05), [2019-09-05, 2019-09-10), [2019-09-10, 2019-09-15)]
>>> intervals(start=dt.date(2019, 8, 31), end=dt.date(2019, 9, 15), delta=dt.timedelta(days=5), intervals_right_closed=True)
[(2019-08-31, 2019-09-05], (2019-09-05, 2019-09-10], (2019-09-10, 2019-09-15]]
>>> intervals(start=dt.date(2019, 8, 31), end=dt.date(2019, 9, 15), delta=dt.timedelta(days=4), intervals_right_closed=False)
[[2019-08-31, 2019-09-04), [2019-09-04, 2019-09-08), [2019-09-08, 2019-09-12), [2019-09-12, 2019-09-15)]
>>> intervals(start=dt.date(2019, 8, 31), end=dt.date(2019, 9, 15), delta=dt.timedelta(days=4), intervals_right_closed=True)
[(2019-08-31, 2019-09-04], (2019-09-04, 2019-09-08], (2019-09-08, 2019-09-12], (2019-09-12, 2019-09-15]]
>>> intervals(start=dt.datetime(2019, 10, 8, 0), end=dt.datetime(2019, 10, 8, 15), delta=dt.timedelta(hours=5), intervals_right_closed=False)
[[2019-10-08 00:00:00, 2019-10-08 05:00:00), [2019-10-08 05:00:00, 2019-10-08 10:00:00), [2019-10-08 10:00:00, 2019-10-08 15:00:00)]
>>> intervals(start=dt.datetime(2019, 10, 8, 0), end=dt.datetime(2019, 10, 8, 15), delta=dt.timedelta(hours=5), intervals_right_closed=True)
[(2019-10-08 00:00:00, 2019-10-08 05:00:00], (2019-10-08 05:00:00, 2019-10-08 10:00:00], (2019-10-08 10:00:00, 2019-10-08 15:00:00]]
>>> intervals(start=dt.datetime(2019, 10, 8, 0), end=dt.datetime(2019, 10, 8, 15), delta=dt.timedelta(hours=4), intervals_right_closed=False)
[[2019-10-08 00:00:00, 2019-10-08 04:00:00), [2019-10-08 04:00:00, 2019-10-08 08:00:00), [2019-10-08 08:00:00, 2019-10-08 12:00:00), [2019-10-08 12:00:00, 2019-10-08 15:00:00)]
>>> intervals(start=dt.datetime(2019, 10, 8, 0), end=dt.datetime(2019, 10, 8, 15), delta=dt.timedelta(hours=4), intervals_right_closed=True)
[(2019-10-08 00:00:00, 2019-10-08 04:00:00], (2019-10-08 04:00:00, 2019-10-08 08:00:00], (2019-10-08 08:00:00, 2019-10-08 12:00:00], (2019-10-08 12:00:00, 2019-10-08 15:00:00]]
"""
result = []
interval_start, interval_end = None, None
while True:
interval_start = start if interval_end is None else interval_end
interval_end = min(end, interval_start + delta)
result.append(thalesians.tsa.intervals.Interval(
interval_start, interval_end,
not intervals_right_closed, intervals_right_closed))
if interval_end == end: break
return result
class Bracket(object):
def __init__(self, interval, interval_offset):
self.interval = interval
self.interval_offset = interval_offset
self._str_Bracket = None
def __eq__(self, other):
return self.interval == other.interval and self.interval_offset == other.interval_offset
def __str__(self):
if self._str_Bracket is None:
self._str_Bracket = '{' + str(self.interval) + ', ' + str(self.interval_offset) + '}'
return self._str_Bracket
def __repr__(self):
return str(self)
def bracket(iterable, origin, interval_size, already_sorted=False, intervals_right_closed=False, coalesce=False):
"""
>>> data = [8, 11, 12, 13, 14, 27, 29, 37, 49, 50, 51, 79, 85]
>>> brackets, bracket_indices = bracket(data, 3, 5)
>>> brackets
[{[8, 13), 1}, {[13, 18), 2}, {[23, 28), 4}, {[28, 33), 5}, {[33, 38), 6}, {[48, 53), 9}, {[78, 83), 15}, {[83, 88), 16}]
>>> bracket_indices
[0, 0, 0, 1, 1, 2, 3, 4, 5, 5, 5, 6, 7]
>>> brackets, bracket_indices = bracket(data, 3, 5, intervals_right_closed=True)
>>> brackets
[{(3, 8], 0}, {(8, 13], 1}, {(13, 18], 2}, {(23, 28], 4}, {(28, 33], 5}, {(33, 38], 6}, {(48, 53], 9}, {(78, 83], 15}, {(83, 88], 16}]
>>> bracket_indices
[0, 1, 1, 1, 2, 3, 4, 5, 6, 6, 6, 7, 8]
>>> brackets, bracket_indices = bracket(data, 3, 5, coalesce=True)
>>> brackets
[{[8, 18), 1}, {[23, 38), 4}, {[48, 53), 9}, {[78, 88), 15}]
>>> bracket_indices
[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
>>> brackets, bracket_indices = bracket(data, 3, 5, intervals_right_closed=True, coalesce=True)
>>> brackets
[{(3, 18], 0}, {(23, 38], 4}, {(48, 53], 9}, {(78, 88], 15}]
>>> bracket_indices
[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
>>> import datetime as dt
>>> data = [dt.date(2017, 1, 31) + dt.timedelta(days=x) for x in [8, 11, 12, 13, 14, 27, 29, 37, 49, 50, 51, 79, 85]];
>>> brackets, bracket_indices = bracket(data, dt.date(2017, 2, 3), dt.timedelta(days=5))
>>> brackets
[{[2017-02-08, 2017-02-13), 1}, {[2017-02-13, 2017-02-18), 2}, {[2017-02-23, 2017-02-28), 4}, {[2017-02-28, 2017-03-05), 5}, {[2017-03-05, 2017-03-10), 6}, {[2017-03-20, 2017-03-25), 9}, {[2017-04-19, 2017-04-24), 15}, {[2017-04-24, 2017-04-29), 16}]
>>> bracket_indices
[0, 0, 0, 1, 1, 2, 3, 4, 5, 5, 5, 6, 7]
>>> brackets, bracket_indices = bracket(data, dt.date(2017, 2, 3), dt.timedelta(days=5), intervals_right_closed=True)
>>> brackets
[{(2017-02-03, 2017-02-08], 0}, {(2017-02-08, 2017-02-13], 1}, {(2017-02-13, 2017-02-18], 2}, {(2017-02-23, 2017-02-28], 4}, {(2017-02-28, 2017-03-05], 5}, {(2017-03-05, 2017-03-10], 6}, {(2017-03-20, 2017-03-25], 9}, {(2017-04-19, 2017-04-24], 15}, {(2017-04-24, 2017-04-29], 16}]
>>> bracket_indices
[0, 1, 1, 1, 2, 3, 4, 5, 6, 6, 6, 7, 8]
>>> brackets, bracket_indices = bracket(data, dt.date(2017, 2, 3), dt.timedelta(days=5), coalesce=True)
>>> brackets
[{[2017-02-08, 2017-02-18), 1}, {[2017-02-23, 2017-03-10), 4}, {[2017-03-20, 2017-03-25), 9}, {[2017-04-19, 2017-04-29), 15}]
>>> bracket_indices
[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
>>> brackets, bracket_indices = bracket(data, dt.date(2017, 2, 3), dt.timedelta(days=5), intervals_right_closed=True, coalesce=True)
>>> brackets
[{(2017-02-03, 2017-02-18], 0}, {(2017-02-23, 2017-03-10], 4}, {(2017-03-20, 2017-03-25], 9}, {(2017-04-19, 2017-04-29], 15}]
>>> bracket_indices
[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
>>> data = [dt.datetime(2017, 1, 31, 0, 0, 0) + dt.timedelta(minutes=x) for x in [8, 11, 12, 13, 14, 27, 29, 37, 49, 50, 51, 79, 85]];
>>> brackets, bracket_indices = bracket(data, dt.datetime(2017, 1, 31, 0, 3, 0), dt.timedelta(minutes=5))
>>> brackets
[{[2017-01-31 00:08:00, 2017-01-31 00:13:00), 1}, {[2017-01-31 00:13:00, 2017-01-31 00:18:00), 2}, {[2017-01-31 00:23:00, 2017-01-31 00:28:00), 4}, {[2017-01-31 00:28:00, 2017-01-31 00:33:00), 5}, {[2017-01-31 00:33:00, 2017-01-31 00:38:00), 6}, {[2017-01-31 00:48:00, 2017-01-31 00:53:00), 9}, {[2017-01-31 01:18:00, 2017-01-31 01:23:00), 15}, {[2017-01-31 01:23:00, 2017-01-31 01:28:00), 16}]
>>> bracket_indices
[0, 0, 0, 1, 1, 2, 3, 4, 5, 5, 5, 6, 7]
>>> brackets, bracket_indices = bracket(data, dt.datetime(2017, 1, 31, 0, 3, 0), dt.timedelta(minutes=5), intervals_right_closed=True)
>>> brackets
[{(2017-01-31 00:03:00, 2017-01-31 00:08:00], 0}, {(2017-01-31 00:08:00, 2017-01-31 00:13:00], 1}, {(2017-01-31 00:13:00, 2017-01-31 00:18:00], 2}, {(2017-01-31 00:23:00, 2017-01-31 00:28:00], 4}, {(2017-01-31 00:28:00, 2017-01-31 00:33:00], 5}, {(2017-01-31 00:33:00, 2017-01-31 00:38:00], 6}, {(2017-01-31 00:48:00, 2017-01-31 00:53:00], 9}, {(2017-01-31 01:18:00, 2017-01-31 01:23:00], 15}, {(2017-01-31 01:23:00, 2017-01-31 01:28:00], 16}]
>>> bracket_indices
[0, 1, 1, 1, 2, 3, 4, 5, 6, 6, 6, 7, 8]
>>> brackets, bracket_indices = bracket(data, dt.datetime(2017, 1, 31, 0, 3, 0), dt.timedelta(minutes=5), coalesce=True)
>>> brackets
[{[2017-01-31 00:08:00, 2017-01-31 00:18:00), 1}, {[2017-01-31 00:23:00, 2017-01-31 00:38:00), 4}, {[2017-01-31 00:48:00, 2017-01-31 00:53:00), 9}, {[2017-01-31 01:18:00, 2017-01-31 01:28:00), 15}]
>>> bracket_indices
[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
>>> brackets, bracket_indices = bracket(data, dt.datetime(2017, 1, 31, 0, 3, 0), dt.timedelta(minutes=5), intervals_right_closed=True, coalesce=True)
>>> brackets
[{(2017-01-31 00:03:00, 2017-01-31 00:18:00], 0}, {(2017-01-31 00:23:00, 2017-01-31 00:38:00], 4}, {(2017-01-31 00:48:00, 2017-01-31 00:53:00], 9}, {(2017-01-31 01:18:00, 2017-01-31 01:28:00], 15}]
>>> bracket_indices
[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
"""
if not already_sorted:
sorted_indices, iterable = zip(*sorted([(i, v) for i, v in enumerate(iterable)], key=operator.itemgetter(1)))
brackets = []
bracket_indices = []
interval_offset = None
interval_left = None
interval_right = None
for x in iterable:
if interval_offset is None or x - interval_left >= interval_size:
new_interval_offset = (x - origin) // interval_size
new_interval_left = origin + new_interval_offset * interval_size
if intervals_right_closed and x == new_interval_left:
new_interval_offset -= 1
new_interval_left -= interval_size
if coalesce and (interval_offset is not None) and (new_interval_left <= brackets[-1].interval.right):
interval_right = new_interval_left + interval_size
brackets[-1].interval = brackets[-1].interval.replace_right(interval_right)
elif interval_offset is None or new_interval_offset != interval_offset:
interval_offset = new_interval_offset
interval_left = new_interval_left
interval_right = interval_left + interval_size
brackets.append(
Bracket(thalesians.tsa.intervals.Interval(interval_left,
interval_right,
not intervals_right_closed,
intervals_right_closed),
interval_offset))
bracket_indices.append(len(brackets) - 1)
if not already_sorted:
new_bracket_indices = [None] * len(bracket_indices)
for i in range(len(bracket_indices)):
new_bracket_indices[sorted_indices[i]] = bracket_indices[i]
bracket_indices = new_bracket_indices
return brackets, bracket_indices
class FlatStoredArray(object):
def __init__(self, *args):
self.__count = self._getcount(*args)
self._data = [None] * self.__count
def _getcount(self):
raise NotImplementedError('Pure virtual method')
def _keytoindex(self, key):
raise NotImplementedError('Pure virtual method')
def _indextokey(self, index):
raise NotImplementedError('Pure virtual method')
def __getitem__(self, key):
return self._data[self._keytoindex(key)]
def __setitem__(self, key, value):
self._data[self._keytoindex(key)] = value
def __len__(self):
return self.__count
def __str__(self):
return str(self._data)
def __repr__(self):
return repr(self._data)
def setall(self, iterable):
for i, v in enumerate(iterable):
if i >= self.__count: break
self._data[i] = v
class __Iterator(object):
def __init__(self, data):
self._data = data
self.__idx = 0
def __iter__(self):
return self
def __next__(self):
if self.__idx < len(self._data):
v = self._data[self.__idx]
self.__idx += 1
return v
raise StopIteration()
def __iter__(self):
return FlatStoredArray.__Iterator(self._data)
class __KeysIterator(object):
def __init__(self, collection):
self.__collection = collection
self.__idx = 0
def __iter__(self):
return self
def __next__(self):
if self.__idx < len(self.__collection):
k = self.__collection._indextokey(self.__idx)
self.__idx += 1
return k
raise StopIteration()
def keys(self):
return FlatStoredArray.__KeysIterator(self)
class __ItemsIterator(object):
def __init__(self, data, collection):
self.__data = data
self.__collection = collection
self.__idx = 0
def __iter__(self):
return self
def __next__(self):
if self.__idx < len(self.__data):
k = self.__collection._indextokey(self.__idx)
v = self.__data[self.__idx]
self.__idx += 1
return k, v
raise StopIteration()
def items(self):
return FlatStoredArray.__ItemsIterator(self._data, self)
class DiagonalArray(FlatStoredArray):
"""
>>> a = DiagonalArray(5)
>>> a[0,0] = 0
>>> a[1,0], a[1,1] = 10, 20
>>> a[2,0], a[2,1], a[2,2] = 30, 40, 50
>>> a[3,0], a[3,1], a[3,2], a[3,3] = 60, 70, 80, 90
>>> a[4,0], a[4,1], a[4,2], a[4,3], a[4,4] = 100, 110, 120, 130, 140
>>> len(a)
15
>>> a[0,0]
0
>>> a[1,0]
10
>>> a[1,1]
20
>>> a[2,0]
30
>>> a[2,1]
40
>>> a[2,2]
50
>>> a[3,0]
60
>>> a[3,1]
70
>>> a[3,2]
80
>>> a[3,3]
90
>>> a[4,0]
100
>>> a[4,1]
110
>>> a[4,2]
120
>>> a[4,3]
130
>>> a[4,4]
140
>>> a[0,0]
0
>>> a[0,1]
10
>>> a[1,1]
20
>>> a[0,2]
30
>>> a[1,2]
40
>>> a[2,2]
50
>>> a[0,3]
60
>>> a[1,3]
70
>>> a[2,3]
80
>>> a[3,3]
90
>>> a[0,4]
100
>>> a[1,4]
110
>>> a[2,4]
120
>>> a[3,4]
130
>>> a[4,4]
140
>>> a._indextokey(0)
(0, 0)
>>> a._indextokey(1)
(1, 0)
>>> a._indextokey(2)
(1, 1)
>>> a._indextokey(3)
(2, 0)
>>> a._indextokey(4)
(2, 1)
>>> a._indextokey(5)
(2, 2)
>>> a._indextokey(6)
(3, 0)
>>> a._indextokey(7)
(3, 1)
>>> a._indextokey(8)
(3, 2)
>>> a._indextokey(9)
(3, 3)
>>> a._indextokey(10)
(4, 0)
>>> a._indextokey(11)
(4, 1)
>>> a._indextokey(12)
(4, 2)
>>> a._indextokey(13)
(4, 3)
>>> a._indextokey(14)
(4, 4)
>>> values = []
>>> for v in a: values.append(v)
>>> tuple(a)
(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140)
>>> keys = []
>>> for k in a.keys(): keys.append(k)
>>> keys
[(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2), (3, 3), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4)]
>>> keys, values = [], []
>>> for k, v in a.items():
... keys.append(k)
... values.append(v)
>>> keys
[(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2), (3, 3), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4)]
>>> values
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140]
>>> a.mindim(1)
1
>>> a.mindim(2)
2
>>> a.mindim(3)
2
>>> a.mindim(4)
3
>>> a.mindim(5)
3
>>> a.mindim(6)
3
>>> a.mindim(7)
4
>>> a.mindim(8)
4
>>> a.mindim(9)
4
>>> a.mindim(10)
4
>>> a.mindim(11)
5
>>> a.mindim(12)
5
>>> a.mindim(13)
5
>>> a.mindim(14)
5
>>> a.mindim(15)
5
"""
def __init__(self, dim):
super(DiagonalArray, self).__init__(dim)
self.__dim = dim
@property
def dim(self): return self.__dim
@classmethod
def _getcount(cls, dim):
return (dim*dim + dim) // 2
@classmethod
def _keytoindex(cls, key):
i, j = key[0], key[1]
if i < j: i, j = j, i
return (i*i + i) // 2 + j
@classmethod
def _indextokey(self, index):
i = int(math.sqrt(2*index))
n = (i*i + i) // 2
j = index - n
if j < 0:
i -= 1
n = (i*i + i) // 2
j = index - n
return i, j
@classmethod
def mindim(cls, count):
dim = int(math.sqrt(2*count))
if cls._getcount(dim) < count:
dim += 1
return dim
@classmethod
def create(cls, obj):
if isinstance(obj, DiagonalArray):
res = DiagonalArray(obj.dim)
res.setall(obj)
elif isinstance(obj, SubdiagonalArray):
res = DiagonalArray(obj.dim)
for k, v in obj.items():
self[k] = v
else:
res = DiagonalArray(cls.mindim(len(obj)))
res.setall(obj)
return res
def tonumpyarray(self, fill=None, symmetric=False):
import numpy as np
if fill is None: fill = np.NAN
res = np.empty((self.__dim, self.__dim))
idx = 0
for i in range(self.__dim):
for j in range(i+1):
res[i,j] = self._data[idx]
if symmetric: res[j,i] = res[i,j]
idx += 1
if not symmetric: res[i,i+1:self.__dim] = fill
return res
class SubdiagonalArray(FlatStoredArray):
"""
>>> a = SubdiagonalArray(5)
>>> a[1,0] = 0
>>> a[2,0], a[2,1] = 10, 20
>>> a[3,0], a[3,1], a[3,2] = 30, 40, 50
>>> a[4,0], a[4,1], a[4,2], a[4,3] = 60, 70, 80, 90
>>> len(a)
10
>>> a[1,0]
0
>>> a[2,0]
10
>>> a[2,1]
20
>>> a[3,0]
30
>>> a[3,1]
40
>>> a[3,2]
50
>>> a[4,0]
60
>>> a[4,1]
70
>>> a[4,2]
80
>>> a[4,3]
90
>>> a[0,1]
0
>>> a[0,2]
10
>>> a[1,2]
20
>>> a[0,3]
30
>>> a[1,3]
40
>>> a[2,3]
50
>>> a[0,4]
60
>>> a[1,4]
70
>>> a[2,4]
80
>>> a[3,4]
90
>>> a._indextokey(0)
(1, 0)
>>> a._indextokey(1)
(2, 0)
>>> a._indextokey(2)
(2, 1)
>>> a._indextokey(3)
(3, 0)
>>> a._indextokey(4)
(3, 1)
>>> a._indextokey(5)
(3, 2)
>>> a._indextokey(6)
(4, 0)
>>> a._indextokey(7)
(4, 1)
>>> a._indextokey(8)
(4, 2)
>>> a._indextokey(9)
(4, 3)
>>> values = []
>>> for v in a: values.append(v)
>>> values
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
>>> keys = []
>>> for k in a.keys(): keys.append(k)
>>> keys
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2), (4, 0), (4, 1), (4, 2), (4, 3)]
>>> keys, values = [], []
>>> for k, v in a.items():
... keys.append(k)
... values.append(v)
>>> keys
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2), (4, 0), (4, 1), (4, 2), (4, 3)]
>>> values
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
>>> a.mindim(1)
2
>>> a.mindim(2)
3
>>> a.mindim(3)
3
>>> a.mindim(4)
4
>>> a.mindim(5)
4
>>> a.mindim(6)
4
>>> a.mindim(7)
5
>>> a.mindim(8)
5
>>> a.mindim(9)
5
>>> a.mindim(10)
5
"""
def __init__(self, dim):
super(SubdiagonalArray, self).__init__(dim)
self.__dim = dim
@property
def dim(self): return self.__dim
@classmethod
def _getcount(cls, dim):
return (dim*dim - dim) // 2
@classmethod
def _keytoindex(cls, key):
i, j = key[0], key[1]
if i < j: i, j = j, i
return (i*i - i) // 2 + j
@classmethod
def _indextokey(cls, index):
i = int(math.sqrt(2*index)) + 1
n = (i*i - i) // 2
j = index - n
if j < 0:
i -= 1
n = (i*i - i) // 2
j = index - n
return i, j
@classmethod
def mindim(cls, count):
dim = int(math.sqrt(2*count)) + 1
if cls._getcount(dim) < count:
dim += 1
return dim
@classmethod
def create(cls, obj):
if isinstance(obj, SubdiagonalArray):
res = SubdiagonalArray(obj.dim)
res.setall(obj)
elif isinstance(obj, DiagonalArray):
res = SubdiagonalArray(obj.dim)
for k, v in obj.items():
if k[0] != k[1]: self[k] = v
else:
res = SubdiagonalArray(cls.mindim(len(obj)))
res.setall(obj)
return res
def tonumpyarray(self, fill=None, symmetric=False):
import numpy as np
if fill is None: fill = np.NAN
res = np.empty((self.__dim, self.__dim))
idx = 0
for i in range(self.__dim):
for j in range(i):
res[i,j] = self._data[idx]
if symmetric: res[j,i] = res[i,j]
idx += 1
res[i,i] = fill
if not symmetric: res[i,i+1:self.__dim] = fill
return res
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
|
|
import logging
import ast
import re
import requests
from teuthology.util.compat import urljoin, urlencode
from collections import OrderedDict
from teuthology.util.compat import PY3
if PY3:
from io import StringIO
else:
from io import BytesIO as StringIO
from teuthology import repo_utils
from teuthology.config import config
from teuthology.contextutil import safe_while
from teuthology.exceptions import (VersionNotFoundError, CommitNotFoundError,
NoRemoteError)
from teuthology.misc import sudo_write_file
from teuthology.orchestra.opsys import OS, DEFAULT_OS_VERSION
from teuthology.orchestra.run import Raw
log = logging.getLogger(__name__)
'''
Map 'generic' package name to 'flavor-specific' package name.
If entry is None, either the package isn't known here, or
it's known but should not be installed on remotes of this flavor
'''
_PACKAGE_MAP = {
'sqlite': {'deb': 'sqlite3', 'rpm': None}
}
'''
Map 'generic' service name to 'flavor-specific' service name.
'''
_SERVICE_MAP = {
'httpd': {'deb': 'apache2', 'rpm': 'httpd'}
}
def get_package_name(pkg, rem):
"""
Find the remote-specific name of the generic 'pkg'
"""
flavor = rem.os.package_type
try:
return _PACKAGE_MAP[pkg][flavor]
except KeyError:
return None
def get_service_name(service, rem):
"""
Find the remote-specific name of the generic 'service'
"""
flavor = rem.os.package_type
try:
return _SERVICE_MAP[service][flavor]
except KeyError:
return None
def install_package(package, remote):
"""
Install 'package' on 'remote'
Assumes repo has already been set up (perhaps with install_repo)
"""
log.info('Installing package %s on %s', package, remote)
flavor = remote.os.package_type
if flavor == 'deb':
pkgcmd = ['DEBIAN_FRONTEND=noninteractive',
'sudo',
'-E',
'apt-get',
'-y',
'--force-yes',
'install',
'{package}'.format(package=package)]
elif flavor == 'rpm':
# FIXME: zypper
pkgcmd = ['sudo',
'yum',
'-y',
'install',
'{package}'.format(package=package)]
else:
log.error('install_package: bad flavor ' + flavor + '\n')
return False
return remote.run(args=pkgcmd)
def remove_package(package, remote):
"""
Remove package from remote
"""
flavor = remote.os.package_type
if flavor == 'deb':
pkgcmd = ['DEBIAN_FRONTEND=noninteractive',
'sudo',
'-E',
'apt-get',
'-y',
'purge',
'{package}'.format(package=package)]
elif flavor == 'rpm':
# FIXME: zypper
pkgcmd = ['sudo',
'yum',
'-y',
'erase',
'{package}'.format(package=package)]
else:
log.error('remove_package: bad flavor ' + flavor + '\n')
return False
return remote.run(args=pkgcmd)
def get_koji_task_result(task_id, remote, ctx):
"""
Queries kojihub and retrieves information about
the given task_id. The package, koji, must be installed
on the remote for this command to work.
We need a remote here because koji can only be installed
on rpm based machines and teuthology runs on Ubuntu.
The results of the given task are returned. For example:
{
'brootid': 3303567,
'srpms': [],
'rpms': [
'tasks/6745/9666745/kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm',
'tasks/6745/9666745/kernel-modules-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm',
],
'logs': []
}
:param task_id: The koji task_id we want to retrieve results for.
:param remote: The remote to run the koji command on.
:param ctx: The ctx from the current run, used to provide a
failure_reason and status if the koji command fails.
:returns: A python dict containing info about the task results.
"""
py_cmd = ('import koji; '
'hub = koji.ClientSession("{kojihub_url}"); '
'print(hub.getTaskResult({task_id}))')
py_cmd = py_cmd.format(
task_id=task_id,
kojihub_url=config.kojihub_url
)
log.info("Querying kojihub for the result of task {0}".format(task_id))
task_result = _run_python_command(py_cmd, remote, ctx)
return task_result
def get_koji_task_rpm_info(package, task_rpms):
"""
Extracts information about a given package from the provided
rpm results of a koji task.
For example, if trying to retrieve the package 'kernel' from
the results of a task, the output would look like this:
{
'base_url': 'https://kojipkgs.fedoraproject.org/work/tasks/6745/9666745/',
'rpm_name': 'kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm',
'package_name': 'kernel',
'version': '4.1.0-0.rc2.git2.1.fc23.x86_64',
}
:param task_rpms: A list of rpms from a tasks reusults.
:param package: The name of the package to retrieve.
:returns: A python dict containing info about the package.
"""
result = dict()
result['package_name'] = package
found_pkg = _find_koji_task_result(package, task_rpms)
if not found_pkg:
raise RuntimeError("The package {pkg} was not found in: {rpms}".format(
pkg=package,
rpms=task_rpms,
))
path, rpm_name = found_pkg.rsplit("/", 1)
result['rpm_name'] = rpm_name
result['base_url'] = "{koji_task_url}/{path}/".format(
koji_task_url=config.koji_task_url,
path=path,
)
# removes the package name from the beginning of rpm_name
version = rpm_name.split("{0}-".format(package), 1)[1]
# removes .rpm from the rpm_name
version = version.split(".rpm")[0]
result['version'] = version
return result
def _find_koji_task_result(package, rpm_list):
"""
Looks in the list of rpms from koji task results to see if
the package we are looking for is present.
Returns the full list item, including the path, if found.
If not found, returns None.
"""
for rpm in rpm_list:
if package == _get_koji_task_result_package_name(rpm):
return rpm
return None
def _get_koji_task_result_package_name(path):
"""
Strips the package name from a koji rpm result.
This makes the assumption that rpm names are in the following
format: <package_name>-<version>.<release>.<arch>.rpm
For example, given a koji rpm result might look like:
tasks/6745/9666745/kernel-4.1.0-0.rc2.git2.1.fc23.x86_64.rpm
This method would return "kernel".
"""
filename = path.split('/')[-1]
trimmed = []
for part in filename.split('-'):
# assumes that when the next part is not a digit
# we're past the name and at the version
if part[0].isdigit():
return '-'.join(trimmed)
trimmed.append(part)
return '-'.join(trimmed)
def get_koji_build_info(build_id, remote, ctx):
"""
Queries kojihub and retrieves information about
the given build_id. The package, koji, must be installed
on the remote for this command to work.
We need a remote here because koji can only be installed
on rpm based machines and teuthology runs on Ubuntu.
Here is an example of the build info returned:
{'owner_name': 'kdreyer', 'package_name': 'ceph',
'task_id': 8534149, 'completion_ts': 1421278726.1171,
'creation_event_id': 10486804, 'creation_time': '2015-01-14 18:15:17.003134',
'epoch': None, 'nvr': 'ceph-0.80.5-4.el7ost', 'name': 'ceph',
'completion_time': '2015-01-14 18:38:46.1171', 'state': 1, 'version': '0.80.5',
'volume_name': 'DEFAULT', 'release': '4.el7ost', 'creation_ts': 1421277317.00313,
'package_id': 34590, 'id': 412677, 'volume_id': 0, 'owner_id': 2826
}
:param build_id: The koji build_id we want to retrieve info on.
:param remote: The remote to run the koji command on.
:param ctx: The ctx from the current run, used to provide a
failure_reason and status if the koji command fails.
:returns: A python dict containing info about the build.
"""
py_cmd = ('import koji; '
'hub = koji.ClientSession("{kojihub_url}"); '
'print(hub.getBuild({build_id}))')
py_cmd = py_cmd.format(
build_id=build_id,
kojihub_url=config.kojihub_url
)
log.info('Querying kojihub for info on build {0}'.format(build_id))
build_info = _run_python_command(py_cmd, remote, ctx)
return build_info
def _run_python_command(py_cmd, remote, ctx):
"""
Runs the given python code on the remote
and returns the stdout from the code as
a python object.
"""
proc = remote.run(
args=[
'python', '-c', py_cmd
],
stdout=StringIO(), stderr=StringIO(), check_status=False
)
if proc.exitstatus == 0:
# returns the __repr__ of a python dict
stdout = proc.stdout.getvalue().strip()
# take the __repr__ and makes it a python dict again
result = ast.literal_eval(stdout)
else:
msg = "Error running the following on {0}: {1}".format(remote, py_cmd)
log.error(msg)
log.error("stdout: {0}".format(proc.stdout.getvalue().strip()))
log.error("stderr: {0}".format(proc.stderr.getvalue().strip()))
ctx.summary["failure_reason"] = msg
ctx.summary["status"] = "dead"
raise RuntimeError(msg)
return result
def get_kojiroot_base_url(build_info, arch="x86_64"):
"""
Builds the base download url for kojiroot given the current
build information.
:param build_info: A dict of koji build information, possibly
retrieved from get_koji_build_info.
:param arch: The arch you want to download rpms for.
:returns: The base_url to use when downloading rpms
from brew.
"""
base_url = "{kojiroot}/{package_name}/{ver}/{rel}/{arch}/".format(
kojiroot=config.kojiroot_url,
package_name=build_info["package_name"],
ver=build_info["version"],
rel=build_info["release"],
arch=arch,
)
return base_url
def get_koji_package_name(package, build_info, arch="x86_64"):
"""
Builds the package name for a brew rpm.
:param package: The name of the package
:param build_info: A dict of koji build information, possibly
retrieved from get_brew_build_info.
:param arch: The arch you want to download rpms for.
:returns: A string representing the file name for the
requested package in koji.
"""
pkg_name = "{name}-{ver}-{rel}.{arch}.rpm".format(
name=package,
ver=build_info["version"],
rel=build_info["release"],
arch=arch,
)
return pkg_name
def get_package_version(remote, package):
installed_ver = None
if remote.os.package_type == "deb":
proc = remote.run(
args=[
'dpkg-query', '-W', '-f', '${Version}', package
],
stdout=StringIO(),
)
else:
proc = remote.run(
args=[
'rpm', '-q', package, '--qf', '%{VERSION}-%{RELEASE}'
],
stdout=StringIO(),
)
if proc.exitstatus == 0:
installed_ver = proc.stdout.getvalue().strip()
# Does this look like a version string?
# this assumes a version string starts with non-alpha characters
if installed_ver and re.match('^[^a-zA-Z]', installed_ver):
log.info("The installed version of {pkg} is {ver}".format(
pkg=package,
ver=installed_ver,
))
else:
installed_ver = None
else:
# should this throw an exception and stop the job?
log.warning(
"Unable to determine if {pkg} is installed: {stdout}".format(
pkg=package,
stdout=proc.stdout.getvalue().strip(),
)
)
return installed_ver
def _get_config_value_for_remote(ctx, remote, config, key):
"""
Look through config, and attempt to determine the "best" value to use
for a given key. For example, given::
config = {
'all':
{'branch': 'master'},
'branch': 'next'
}
_get_config_value_for_remote(ctx, remote, config, 'branch')
would return 'master'.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:param key: the name of the value to retrieve
"""
roles = ctx.cluster.remotes[remote] if ctx else None
if 'all' in config:
return config['all'].get(key)
elif roles:
for role in roles:
if role in config and key in config[role]:
return config[role].get(key)
return config.get(key)
def _get_response(url, wait=False, sleep=15, tries=10):
with safe_while(sleep=sleep, tries=tries, _raise=False) as proceed:
while proceed():
resp = requests.get(url)
if resp.ok:
log.info('Package found...')
break
if not wait:
log.info(
'Package is not found at: %s (got HTTP code %s)...',
url,
resp.status_code,
)
break
log.info(
'Package not there yet (got HTTP code %s), waiting...',
resp.status_code,
)
return resp
class GitbuilderProject(object):
"""
Represents a project that is built by gitbuilder.
"""
# gitbuilder always uses this value
rpm_release = "1-0"
def __init__(self, project, job_config, ctx=None, remote=None):
self.project = project
self.job_config = job_config
#TODO: we could get around the need for ctx by using a list
# of roles instead, ctx is only used in _get_config_value_for_remote.
self.ctx = ctx
self.remote = remote
if remote and ctx:
self._init_from_remote()
else:
self._init_from_config()
self.dist_release = self._get_dist_release()
def _init_from_remote(self):
"""
Initializes the class from a teuthology.orchestra.remote.Remote object
"""
self.arch = self.remote.arch
self.os_type = self.remote.os.name
self.os_version = self.remote.os.version
self.codename = self.remote.os.codename
self.pkg_type = self.remote.system_type
self.distro = self._get_distro(
distro=self.remote.os.name,
version=self.remote.os.version,
codename=self.remote.os.codename,
)
# when we're initializing with a remote we most likely have
# a task config, not the entire teuthology job config
self.flavor = self.job_config.get("flavor", "basic")
self.tag = self.job_config.get("tag")
def _init_from_config(self):
"""
Initializes the class from a teuthology job config
"""
self.arch = self.job_config.get('arch', 'x86_64')
self.os_type = self.job_config.get("os_type")
self.flavor = self.job_config.get("flavor")
self.codename = self.job_config.get("codename")
self.os_version = self._get_version()
# if os_version is given, prefer version/codename derived from it
if self.os_version:
self.os_version, self.codename = \
OS.version_codename(self.os_type, self.os_version)
self.branch = self.job_config.get("branch")
self.tag = self.job_config.get("tag")
self.ref = self.job_config.get("ref")
self.distro = self._get_distro(
distro=self.os_type,
version=self.os_version,
codename=self.codename,
)
self.pkg_type = "deb" if self.os_type.lower() in (
"ubuntu",
"debian",
) else "rpm"
if not getattr(self, 'flavor'):
# avoiding circular imports
from teuthology.suite.util import get_install_task_flavor
# when we're initializing from a full teuthology config, not just a
# task config we need to make sure we're looking at the flavor for
# the install task
self.flavor = get_install_task_flavor(self.job_config)
@property
def sha1(self):
"""
Performs a call to gitbuilder to retrieve the sha1 if not provided in
the job_config. The returned value is cached so that this call only
happens once.
:returns: The sha1 of the project as a string.
"""
if not hasattr(self, "_sha1"):
self._sha1 = self.job_config.get('sha1')
if not self._sha1:
self._sha1 = self._get_package_sha1()
return self._sha1
@property
def version(self):
"""
Performs a call to gitubilder to retrieve the version number for the
project. The returned value is cached so that this call only happens
once.
:returns: The version number of the project as a string.
"""
if not hasattr(self, '_version'):
self._version = self._get_package_version()
return self._version
@property
def base_url(self):
"""
The base url that points at this project on gitbuilder.
For example::
http://gitbuilder.ceph.com/ceph-deb-raring-x86_64-basic/ref/master
:returns: A string of the base url for this project
"""
return self._get_base_url()
@property
def uri_reference(self):
"""
The URI reference that identifies what build of the project
we'd like to use.
For example, the following could be returned::
ref/<branch>
sha1/<sha1>
ref/<tag>
:returns: The uri_reference as a string.
"""
return self._get_uri_reference()
def _get_dist_release(self):
version = self._parse_version(self.os_version)
if self.os_type in ('centos', 'rhel'):
return "el{0}".format(version)
elif self.os_type == "fedora":
return "fc{0}".format(version)
else:
# debian and ubuntu just use the distro name
return self.os_type
@staticmethod
def _parse_version(version):
"""
Parses a distro version string and returns a modified string
that matches the format needed for the gitbuilder url.
Minor version numbers are ignored.
"""
return version.split(".")[0]
@classmethod
def _get_distro(cls, distro=None, version=None, codename=None):
"""
Given a distro and a version, returned the combined string
to use in a gitbuilder url.
:param distro: The distro as a string
:param version: The version as a string
:param codename: The codename for the distro.
Used for deb based distros.
"""
if distro in ('centos', 'rhel'):
distro = "centos"
elif distro == "fedora":
distro = "fedora"
elif distro == "opensuse":
distro = "opensuse"
elif distro == "sle":
distro == "sle"
else:
# deb based systems use codename instead of a distro/version combo
if not codename:
# lookup codename based on distro string
codename = OS._version_to_codename(distro, version)
if not codename:
msg = "No codename found for: {distro} {version}".format(
distro=distro,
version=version,
)
log.exception(msg)
raise RuntimeError()
return codename
return "{distro}{version}".format(
distro=distro,
version=cls._parse_version(version),
)
def _get_version(self):
"""
Attempts to find the distro version from the job_config.
If not found, it will return the default version for
the distro found in job_config.
:returns: A string distro version
"""
version = self.job_config.get("os_version")
if not version:
version = DEFAULT_OS_VERSION.get(self.os_type)
return str(version)
def _get_uri_reference(self):
"""
Returns the URI reference that identifies what build of the project
we'd like to use.
If a remote is given, it will attempt to read the config for the given
remote to find either a tag, branch or sha1 defined. If there is no
remote, the sha1 from the config will be used.
If a tag, branch or sha1 can't be found it will default to use the
build from the master branch.
:returns: A string URI. Ex: ref/master
"""
ref_name, ref_val = next(iter(self._choose_reference().items()))
if ref_name == 'sha1':
return 'sha1/%s' % ref_val
else:
return 'ref/%s' % ref_val
def _choose_reference(self):
"""
Since it's only meaningful to search for one of:
ref, tag, branch, sha1
Decide which to use.
:returns: a single-key dict containing the name and value of the
reference to use, e.g. {'branch': 'master'}
"""
tag = branch = sha1 = None
if self.remote:
tag = _get_config_value_for_remote(self.ctx, self.remote,
self.job_config, 'tag')
branch = _get_config_value_for_remote(self.ctx, self.remote,
self.job_config, 'branch')
sha1 = _get_config_value_for_remote(self.ctx, self.remote,
self.job_config, 'sha1')
ref = None
else:
ref = self.ref
tag = self.tag
branch = self.branch
sha1 = self.sha1
def warn(attrname):
names = ('ref', 'tag', 'branch', 'sha1')
vars = (ref, tag, branch, sha1)
# filter(None,) filters for truth
if sum(1 for _ in vars if _) > 1:
log.warning(
"More than one of ref, tag, branch, or sha1 supplied; "
"using %s",
attrname
)
for n, v in zip(names, vars):
log.info('%s: %s' % (n, v))
if ref:
warn('ref')
return dict(ref=ref)
elif tag:
warn('tag')
return dict(tag=tag)
elif branch:
warn('branch')
return dict(branch=branch)
elif sha1:
warn('sha1')
return dict(sha1=sha1)
else:
log.warning("defaulting to master branch")
return dict(branch='master')
def _get_base_url(self):
"""
Figures out which package repo base URL to use.
"""
template = config.baseurl_template
# get distro name and arch
base_url = template.format(
host=config.gitbuilder_host,
proj=self.project,
pkg_type=self.pkg_type,
arch=self.arch,
dist=self.distro,
flavor=self.flavor,
uri=self.uri_reference,
)
return base_url
def _get_package_version(self):
"""
Look for, and parse, a file called 'version' in base_url.
"""
url = "{0}/version".format(self.base_url)
log.info("Looking for package version: {0}".format(url))
# will loop and retry until a 200 is returned or the retry
# limits are reached
resp = _get_response(url, wait=self.job_config.get("wait_for_package", False))
if not resp.ok:
raise VersionNotFoundError(url)
version = resp.text.strip().lstrip('v')
log.info("Found version: {0}".format(version))
return version
def _get_package_sha1(self):
"""
Look for, and parse, a file called 'sha1' in base_url.
"""
url = "{0}/sha1".format(self.base_url)
log.info("Looking for package sha1: {0}".format(url))
resp = requests.get(url)
sha1 = None
if not resp.ok:
# TODO: maybe we should have this retry a few times?
log.error(
'Package sha1 was not there (got HTTP code %s)...',
resp.status_code,
)
else:
sha1 = resp.text.strip()
log.info("Found sha1: {0}".format(sha1))
return sha1
def install_repo(self):
"""
Install the .repo file or sources.list fragment on self.remote if there
is one. If not, raises an exception
"""
if not self.remote:
raise NoRemoteError()
if self.remote.os.package_type == 'rpm':
self._install_rpm_repo()
elif self.remote.os.package_type == 'deb':
self._install_deb_repo()
def _install_rpm_repo(self):
dist_release = self.dist_release
project = self.project
proj_release = \
'{proj}-release-{release}.{dist_release}.noarch'.format(
proj=project, release=self.rpm_release,
dist_release=dist_release
)
rpm_name = "{rpm_nm}.rpm".format(rpm_nm=proj_release)
url = "{base_url}/noarch/{rpm_name}".format(
base_url=self.base_url, rpm_name=rpm_name)
if dist_release in ['opensuse', 'sle']:
url = "{base_url}/{arch}".format(
base_url=self.base_url, arch=self.arch)
self.remote.run(args=[
'sudo', 'zypper', '-n', 'addrepo', '--refresh', '--no-gpgcheck',
'-p', '1', url, 'ceph-rpm-under-test',
])
else:
self.remote.run(args=['sudo', 'yum', '-y', 'install', url])
def _install_deb_repo(self):
self.remote.run(
args=[
'echo', 'deb', self.base_url, self.codename, 'main',
Raw('|'),
'sudo', 'tee',
'/etc/apt/sources.list.d/{proj}.list'.format(
proj=self.project),
],
stdout=StringIO(),
)
def remove_repo(self):
"""
Remove the .repo file or sources.list fragment on self.remote if there
is one. If not, raises an exception
"""
if not self.remote:
raise NoRemoteError()
if self.remote.os.package_type == 'rpm':
self._remove_rpm_repo()
elif self.remote.os.package_type == 'deb':
self._remove_deb_repo()
def _remove_rpm_repo(self):
if self.dist_release in ['opensuse', 'sle']:
self.remote.run(args=[
'sudo', 'zypper', '-n', 'removerepo', 'ceph-rpm-under-test'
])
else:
remove_package('%s-release' % self.project, self.remote)
def _remove_deb_repo(self):
self.remote.run(
args=[
'sudo',
'rm', '-f',
'/etc/apt/sources.list.d/{proj}.list'.format(
proj=self.project),
]
)
class ShamanProject(GitbuilderProject):
def __init__(self, project, job_config, ctx=None, remote=None):
super(ShamanProject, self).__init__(project, job_config, ctx, remote)
self.query_url = 'https://%s/api/' % config.shaman_host
# Force to use the "noarch" instead to build the uri.
self.force_noarch = self.job_config.get("shaman", {}).get("force_noarch", False)
def _get_base_url(self):
self.assert_result()
return self._result.json()[0]['url']
@property
def _result(self):
if getattr(self, '_result_obj', None) is None:
self._result_obj = self._search()
return self._result_obj
def _search(self):
uri = self._search_uri
log.debug("Querying %s", uri)
resp = requests.get(
uri,
headers={'content-type': 'application/json'},
)
resp.raise_for_status()
return resp
@property
def _search_uri(self):
flavor = self.flavor
if flavor == 'basic':
flavor = 'default'
req_obj = OrderedDict()
req_obj['status'] = 'ready'
req_obj['project'] = self.project
req_obj['flavor'] = flavor
arch = "noarch" if self.force_noarch else self.arch
req_obj['distros'] = '%s/%s' % (self.distro, arch)
ref_name, ref_val = list(self._choose_reference().items())[0]
if ref_name == 'tag':
req_obj['sha1'] = self._sha1 = self._tag_to_sha1()
elif ref_name == 'sha1':
req_obj['sha1'] = ref_val
else:
req_obj['ref'] = ref_val
req_str = urlencode(req_obj)
uri = urljoin(
self.query_url,
'search',
) + '?%s' % req_str
return uri
def _tag_to_sha1(self):
"""
Shaman doesn't know about tags. Use git ls-remote to query the remote
repo in order to map tags to their sha1 value.
This method will also retry against ceph.git if the original request
uses ceph-ci.git and fails.
"""
def get_sha1(url):
# Ceph (and other projects) uses annotated tags for releases. This
# has the side-effect of making git ls-remote return the sha1 for
# the annotated tag object and not the last "real" commit in that
# tag. By contrast, when a person (or a build system) issues a
# "git checkout <tag>" command, HEAD will be the last "real" commit
# and not the tag.
# Below we have to append "^{}" to the tag value to work around
# this in order to query for the sha1 that the build system uses.
return repo_utils.ls_remote(url, "%s^{}" % self.tag)
git_url = repo_utils.build_git_url(self.project)
result = get_sha1(git_url)
# For upgrade tests that are otherwise using ceph-ci.git, we need to
# also look in ceph.git to lookup released tags.
if result is None and 'ceph-ci' in git_url:
alt_git_url = git_url.replace('ceph-ci', 'ceph')
log.info(
"Tag '%s' not found in %s; will also look in %s",
self.tag,
git_url,
alt_git_url,
)
result = get_sha1(alt_git_url)
if result is None:
raise CommitNotFoundError(self.tag, git_url)
return result
def assert_result(self):
if len(self._result.json()) == 0:
raise VersionNotFoundError(self._result.url)
@classmethod
def _get_distro(cls, distro=None, version=None, codename=None):
if distro in ('centos', 'rhel'):
distro = 'centos'
version = cls._parse_version(version)
return "%s/%s" % (distro, version)
def _get_package_sha1(self):
# This doesn't raise because GitbuilderProject._get_package_sha1()
# doesn't either.
if not len(self._result.json()):
log.error("sha1 not found: %s", self._result.url)
else:
return self._result.json()[0]['sha1']
def _get_package_version(self):
self.assert_result()
return self._result.json()[0]['extra']['package_manager_version']
@property
def scm_version(self):
self.assert_result()
return self._result.json()[0]['extra']['version']
@property
def repo_url(self):
self.assert_result()
return urljoin(
self._result.json()[0]['chacra_url'],
'repo',
)
@property
def build_complete(self):
# use the repo search results to get a ref and a sha1; the
# input to teuthology-suite doesn't contain both
try:
self.assert_result()
except VersionNotFoundError:
return False
search_result = self._result.json()[0]
# now look for the build complete status
path = '/'.join(
('builds/ceph', search_result['ref'], search_result['sha1'])
)
build_url = urljoin(self.query_url, path)
try:
resp = requests.get(build_url)
resp.raise_for_status()
except requests.HttpError:
return False
for build in resp.json():
if (
build['distro'] == search_result['distro'] and
build['distro_version'] == search_result['distro_version'] and
build['flavor'] == search_result['flavor'] and
build['distro_arch'] in search_result['archs']
):
return build['status'] == 'completed'
return False
def _get_repo(self):
resp = requests.get(self.repo_url)
resp.raise_for_status()
return str(resp.text)
def _install_rpm_repo(self):
dist_release = self.dist_release
repo = self._get_repo()
if dist_release in ['opensuse', 'sle']:
log.info("Writing zypper repo:\n{}".format(repo))
sudo_write_file(
self.remote,
'/etc/zypp/repos.d/{proj}.repo'.format(proj=self.project),
repo,
)
else:
log.info("Writing yum repo:\n{}".format(repo))
sudo_write_file(
self.remote,
'/etc/yum.repos.d/{proj}.repo'.format(proj=self.project),
repo,
)
def _install_deb_repo(self):
repo = self._get_repo()
sudo_write_file(
self.remote,
'/etc/apt/sources.list.d/{proj}.list'.format(
proj=self.project),
repo,
)
def _remove_rpm_repo(self):
# FIXME: zypper
self.remote.run(
args=[
'sudo',
'rm', '-f',
'/etc/yum.repos.d/{proj}.repo'.format(proj=self.project),
]
)
def get_builder_project():
"""
Depending on whether config.use_shaman is True or False, return
GitbuilderProject or ShamanProject (the class, not an instance).
"""
if config.use_shaman is True:
builder_class = ShamanProject
else:
builder_class = GitbuilderProject
return builder_class
|
|
import csv
import os
import shutil
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.neighbors import NearestNeighbors
from scipy.spatial import ConvexHull
from matplotlib import pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import pylab
from scipy.ndimage.filters import gaussian_filter
def softmax(v):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(v - np.max(v))
return e_x / e_x.sum()
loadfile = open('D:/Master Thesis/Dataset/pos_features.csv', 'rb')
reader = csv.reader(loadfile, delimiter=',')
n_data = 0
n_features = 0
names = {}
name = None
line_num = 0
feature_desc = ()
# Get data size and lines for each individual
for line_num, l in enumerate(reader):
if line_num == 0:
feature_desc = l[:-2]
continue
n_data += 1
n_features = len(l) - 2
if name is None:
name = l[n_features].split('_')[0]
names[name] = [line_num - 1, 0]
elif name != l[n_features].split('_')[0]:
names[name][1] = line_num - 1
name = l[n_features].split('_')[0]
names[name] = [line_num - 1, 0]
names[name][1] = line_num - 1
print (names)
dataset = np.zeros([n_data, n_features])
loadfile.seek(0)
for i, l in enumerate(reader):
if i == 0:
continue
if i == n_data:
break
dataset[i - 1, :] = np.asarray(l[:-2])
# for n_kmeans in range(2, 15):
removed_entries = []
candidates = []
removed_entries.append([[], 0])
candidates.append(set())
for i in range(n_features):
removed_entries.append([[i], 0])
candidates.append(set([i]))
i_removed = 0
while True:
selection = np.array([x not in removed_entries[i_removed][0] for x in range(n_features)], dtype=np.bool)
print(selection)
overall_score = 0
successful_ops = 0
fails = 0
output_dir = 'D:/Master Thesis/Clustering/tmp2'
excluded_string = '-'.join(np.array(feature_desc, dtype=np.str)[np.invert(selection)].tolist())
included_string = '-'.join(np.array(feature_desc, dtype=np.str)[selection].tolist())
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for name, lines in names.iteritems():
print(name)
start = lines[0]
end = lines[1]
step = 1
if end - start > 5000:
step = int((end - start) / 5000)
data = dataset[start:end:step, selection]
data = StandardScaler().fit_transform(data)
num_samples = 10
nbrs = NearestNeighbors(n_neighbors=num_samples, metric='euclidean', algorithm='ball_tree').fit(data)
distances, _ = nbrs.kneighbors(data)
nn_distances = gaussian_filter(np.sort(distances[:, -1]), sigma=5)
grad_distances = np.gradient(nn_distances)
grad2_distances = np.gradient(grad_distances)
eps = nn_distances[np.argmax(grad_distances[np.logical_and(grad_distances < 0.002, grad2_distances < 0.002)])]
print(eps)
plt.plot(nn_distances, 'r-', np.gradient(nn_distances), 'b-', grad2_distances, 'g-')
plt.show()
db = DBSCAN(eps=eps, min_samples=num_samples).fit(data)
# core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# core_samples_mask[db.core_sample_indices_] = True
# db = KMeans(n_clusters=n_kmeans, algorithm='elkan').fit(data)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
title_string = 'Clusters: ' + str(n_clusters_)
print('Estimated number of clusters: %d' % n_clusters_)
if n_clusters_ >= 2:
score = metrics.silhouette_score(data, labels, sample_size=5000)
successful_ops += 1
overall_score += score
print("Silhouette Coefficient: %0.3f" % score)
title_string += ', Score: ' + str(score)
else:
fails += 1
title_string += ', Score: NA'
#####################################################
# PLOTTING #
#####################################################
fig, ax = plt.subplots(figsize=(8, 7), dpi=100)
pca = PCA(n_components=2)
pca.fit(data)
data = pca.transform(data)
patches = []
for l in range(n_clusters_):
cluster = data[labels == l, :]
try:
hull = ConvexHull(cluster)
patches.append(Polygon(cluster[hull.vertices, :]))
except:
pass
# plt.plot(cluster[hull.vertices, 0], cluster[hull.vertices, 1], 'r-', lw=1)
# plt.plot(cluster[hull.vertices[-1:0], 0], cluster[hull.vertices[-1:0], 1], 'r-', lw=1)
p = PatchCollection(patches, cmap=matplotlib.cm.rainbow, alpha=0.4)
ax.add_collection(p)
ax.set_facecolor('black')
plt.scatter(data[labels != -1, 0], data[labels != -1, 1], c=labels[labels != -1], s=1, cmap='rainbow')
ax.set_title(name + '\n' + 'Excluded: ' + excluded_string + '\n' + title_string)
pylab.savefig('{}/{}.png'.format(output_dir, name), bbox_inches='tight')
plt.close('all')
# plt.show()
if fails > 5:
overall_score = -1
else:
overall_score /= successful_ops
removed_entries[i_removed][1] = overall_score
print '{}, {}'.format(i_removed, removed_entries[i_removed][:])
i_removed += 1
if i_removed == len(removed_entries):
scores = np.array([removed_entries[i][1] for i in range(len(removed_entries))])
new_item = set()
while new_item in candidates or len(new_item) > n_features - 2:
s = np.random.choice(len(removed_entries), size=2, replace=False, p=softmax(scores))
new_item = set(removed_entries[s[0]][0] + removed_entries[s[1]][0])
removed_entries.append([list(new_item), 0])
candidates.append(new_item)
move_dir = 'D:/Master Thesis/Clustering/dbschan_eps_{}_samples_10/{}-include-{}-exclude-{}'.format(
str(eps_dbscan).replace(
'.', '_'),
str(overall_score).replace(
'.', '_'),
included_string,
excluded_string)
if os.path.exists(move_dir):
shutil.rmtree(move_dir)
shutil.move(output_dir, move_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if len(removed_entries) == 50:
break
|
|
"""Binary for decoding from translation models based on tensorflow/models/rnn/translate/translate.py.
Note that this decoder is greedy and very basic. For a better decoder, see http://ucam-smt.github.io/sgnmt/html/tutorial.html
which supports decoding from tensorflow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle
import sys
import numpy as np
import datetime
import logging
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
if __name__ == "__main__":
from tensorflow.models.rnn.translate.train import FLAGS as train_flags # get default model settings from train.py
from tensorflow.models.rnn.translate.utils import data_utils, model_utils
# Decoder settings
tf.app.flags.DEFINE_string("test_src_idx", "/tmp/in.txt", "An integer-encoded input file")
tf.app.flags.DEFINE_string("test_out_idx", "/tmp/out.txt", "Output file for decoder output")
tf.app.flags.DEFINE_string("output_hidden", "/tmp/hidden", "Output file for hidden state")
tf.app.flags.DEFINE_integer("max_sentences", 0, "The maximum number of sentences to translate (all if set to 0)")
tf.app.flags.DEFINE_string("decode_hidden", None, "Decode from hidden layers in file")
tf.app.flags.DEFINE_boolean("interactive", False, "Decode from command line")
tf.app.flags.DEFINE_string("decode_interpolate_hidden", None, "Decode from hidden layers interpolating between true and generated hidden vectors")
FLAGS = tf.app.flags.FLAGS
def decode(config, input=None, output=None, max_sentences=0):
if input and output:
inp = input
out = output
else:
inp = config['test_src_idx']
out = config['test_out_idx']
if 'output_hidden' in config:
hidden = config['output_hidden']
else:
hidden = '/tmp/hidden'
max_sents = 0
if 'max_sentences' in config:
max_sents = config and config['max_sentences']
if max_sentences > 0:
max_sents = max_sentences
if 'decode_hidden' in config and config['decode_hidden']:
unpickle_hidden(config, out, max_sentences=max_sents)
elif 'decode_interpolate_hidden' in config and config['decode_interpolate_hidden']:
decode_interpolate_hidden(config, out, max_sentences=max_sents)
else:
# Find longest input to create suitable bucket
max_input_length = 0
with open(inp) as f_in:
for sentence in f_in:
token_ids = [ int(tok) for tok in sentence.strip().split() ]
if config['add_src_eos']:
token_ids.append(data_utils.EOS_ID)
if len(token_ids) > max_input_length:
max_input_length = len(token_ids)
buckets = list(model_utils._buckets)
logging.info("Decoder buckets: {}".format(buckets))
max_bucket = buckets[len(buckets) - 1][0]
if max_input_length > max_bucket:
bucket = model_utils.make_bucket(max_input_length, greedy_decoder=True, max_trg_len=config['max_target_length'])
buckets.append(bucket)
logging.info("Add new bucket={}".format(bucket))
with tf.Session() as session:
# Create model and load parameters: uses the training graph for decoding
config['batch_size'] = 1 # We decode one sentence at a time.
model = model_utils.create_model(session, config, forward_only=True,
buckets=buckets)
# Decode input file
num_sentences = 0
logging.info("Start decoding, max_sentences=%i" % max_sents)
with open(inp) as f_in, open(out, 'w') as f_out, open(hidden, 'wb') as f_hidden:
pickler = cPickle.Pickler(f_hidden)
for sentence in f_in:
outputs, states = get_outputs(session, config, model, sentence, buckets)
logging.info("Output: {}".format(outputs))
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
print(" ".join([str(tok) for tok in outputs]), file=f_out)
pickler.dump({'states': states, 'length': len(outputs)})
num_sentences += 1
if max_sents > 0 and num_sentences >= max_sents:
break
logging.info("Decoding completed.")
def unpickle_hidden(config, out, max_sentences=0):
hidden_list = []
with open(config['decode_hidden'], 'rb') as f_in:
unpickler = cPickle.Unpickler(f_in)
hidden_size = config['hidden_size']
while True and (max_sentences == 0 or len(hidden_list) < max_sentences):
try:
hidden = np.array(unpickler.load()['states'])
if config['seq2seq_mode'] == 'autoencoder':
hidden = hidden.reshape(1, 2*hidden_size) # batch_size, BiRNN size
elif config['seq2seq_mode'] == 'vae':
hidden = hidden.reshape(1, config['latent_size'])
hidden_list.append(hidden)
except (EOFError):
break
with tf.Session() as session:
config['batch_size'] = 1 # We decode one sentence at a time.
model = model_utils.create_model(session, config, forward_only=True, hidden=True)
decode_hidden(session, model, config, out, hidden_list)
def decode_interpolate_hidden(config, out, max_sentences=0):
hidden_list = []
num_decoded = 0
with tf.Session() as session:
config['batch_size'] = 1 # We decode one sentence at a time.
model = model_utils.create_model(session, config, forward_only=True, hidden=True)
if model.seq2seq_mode == 'autoencoder':
resize_dim = config['hidden_size']
if config['use_lstm']:
resize_dim *= 2
elif model.seq2seq_mode == 'vae':
resize_dim = config['latent_size']
with open(config['decode_interpolate_hidden'], 'rb') as f_in:
label_samples = cPickle.load(f_in)
for label in label_samples:
log_msg(config['test_out_idx'],
'Gaussian mixture component: {}\n'.format(label))
for interp_list in label_samples[label]:
log_msg(config['test_out_idx'], 'New interpolation set\n')
for i in range(0, len(interp_list)):
interp_list[i] = interp_list[i].reshape(1, resize_dim)
decode_hidden(session, model, config, out, interp_list, append=True)
num_decoded += 1
if num_decoded > max_sentences and max_sentences > 0:
break
def log_msg(f_name, msg):
with open(f_name, 'a') as f_out:
f_out.write(msg)
def decode_hidden(session, model, config, out, hidden_list, append=False):
if append:
mode = 'a'
else:
mode = 'w'
with open(config['test_out_idx'], mode) as f_out:
for hidden in hidden_list:
#hidden = np.random.randn(1, 1000)
outputs, states = get_outputs(session, config, model, sentence='', hidden=hidden)
logging.info("Output: {}".format(outputs))
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
print(" ".join([str(tok) for tok in outputs]), file=f_out)
def decode_interactive(config):
with tf.Session() as session:
# Create model and load parameters: uses the training graph for decoding
config['batch_size'] = 1 # We decode one sentence at a time.
model = model_utils.create_model(session, config, forward_only=True)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
outputs, states = get_outputs(session, config, model, sentence)
print("Output: %s" % " ".join([str(tok) for tok in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def get_outputs(session, config, model, sentence, buckets=None, hidden=None):
# Get token-ids for the input sentence.
token_ids = [ int(tok) for tok in sentence.strip().split() ]
token_ids = [ w if w < config['src_vocab_size'] else data_utils.UNK_ID
for w in token_ids ]
if config['add_src_eos']:
token_ids.append(data_utils.EOS_ID)
if not buckets:
buckets = model_utils._buckets
if hidden is None:
bucket_id = min([b for b in xrange(len(buckets))
if buckets[b][0] >= len(token_ids)])
else:
bucket_id = max([b for b in xrange(len(buckets))])
logging.info("Bucket {}".format(buckets[bucket_id]))
logging.info("Input: {}".format(token_ids))
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights, sequence_length, src_mask, trg_mask = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id, config['encoder'])
# Get output logits for the sentence.
_, _, output_logits, hidden_states = model.get_state_step(
session, encoder_inputs,
decoder_inputs,
target_weights, bucket_id,
forward_only=True,
sequence_length=sequence_length,
src_mask=src_mask, trg_mask=trg_mask,
hidden=hidden)
outputs = []
for logit in output_logits:
outputs.append(int(np.argmax(logit, axis=1)))
if outputs[-1] == data_utils.EOS_ID:
break
return outputs, hidden_states
def main(_):
config = model_utils.process_args(FLAGS, train=False, greedy_decoder=True)
if FLAGS.interactive:
decode_interactive(config)
else:
decode(config)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
logging.info("Start: {}".format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')))
tf.app.run()
logging.info("End: {}".format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')))
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Oppia resource handling (e.g. templates, images)."""
from __future__ import annotations
import os
from core import feconf
from core import python_utils
from core.constants import constants
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import story_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import user_services
from core.tests import test_utils
class AssetDevHandlerImageTests(test_utils.GenericTestBase):
IMAGE_UPLOAD_URL_PREFIX = '/createhandler/imageupload'
ASSET_HANDLER_URL_PREFIX = '/assetsdevhandler'
def _get_image_url(self, entity_type, entity_id, filename):
"""Gets the image URL."""
return '%s/%s/%s/assets/image/%s' % (
self.ASSET_HANDLER_URL_PREFIX, entity_type, entity_id, filename)
def setUp(self):
"""Load a demo exploration and register self.EDITOR_EMAIL."""
super(AssetDevHandlerImageTests, self).setUp()
exp_services.delete_demo('0')
self.system_user = user_services.get_system_user()
exp_services.load_demo('0')
rights_manager.release_ownership_of_exploration(
self.system_user, '0')
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
def test_image_upload_with_no_filename_raises_error(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX, {},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),),
expected_status_int=400)
self.assertEqual(
response_dict['error'], 'Missing key in handler args: filename.')
self.logout()
def test_get_image_with_invalid_page_context_raises_error(self):
self.login(self.EDITOR_EMAIL)
# Only 404 is raised here due to the try - except block in the
# controller.
self.get_json(
self._get_image_url('invalid_context', '0', 'filename'),
expected_status_int=404)
self.logout()
def test_image_upload_with_invalid_filename_raises_error(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': '.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),),
expected_status_int=400)
error_msg = (
'Schema validation for \'filename\' failed: Validation'
' failed: is_regex_matched ({\'regex_pattern\': '
'\'\\\\w+[.]\\\\w+\'}) for object .png'
)
self.assertEqual(response_dict['error'], error_msg)
self.logout()
def test_cannot_upload_duplicate_image(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),))
filename = response_dict['filename']
response = self.get_custom_response(
self._get_image_url('exploration', '0', filename), 'image/png')
self.assertEqual(response.body, raw_image)
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),),
expected_status_int=400)
self.assertEqual(
response_dict['error'],
'A file with the name test.png already exists. Please choose a '
'different name.')
def test_image_upload_and_download(self):
"""Test image uploading and downloading."""
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
subtopic = topic_domain.Subtopic.create_default_subtopic(
1, 'Subtopic Title')
story_id = story_services.get_new_story_id()
topic_id = topic_fetchers.get_new_topic_id()
skill_id = skill_services.get_new_skill_id()
self.save_new_story(story_id, admin_id, topic_id)
self.save_new_topic(
topic_id, admin_id, name='Name',
description='Description', canonical_story_ids=[story_id],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
self.save_new_skill(skill_id, admin_id, description='Description')
# Page context: Exploration.
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),)
)
filename = response_dict['filename']
self.logout()
response = self.get_custom_response(
self._get_image_url('exploration', '0', filename), 'image/png')
self.assertEqual(response.body, raw_image)
# Page context: Topic.
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/topic/%s' % (self.IMAGE_UPLOAD_URL_PREFIX, topic_id),
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),)
)
filename = response_dict['filename']
self.logout()
response = self.get_custom_response(
self._get_image_url('topic', topic_id, filename), 'image/png')
self.assertEqual(response.body, raw_image)
# Page context: Story.
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb',
encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/story/%s' % (self.IMAGE_UPLOAD_URL_PREFIX, story_id),
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),)
)
filename = response_dict['filename']
self.logout()
response = self.get_custom_response(
self._get_image_url('story', story_id, filename), 'image/png')
self.assertEqual(response.body, raw_image)
# Page context: Skill.
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb',
encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/skill/%s' % (self.IMAGE_UPLOAD_URL_PREFIX, skill_id),
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),)
)
filename = response_dict['filename']
self.logout()
response = self.get_custom_response(
self._get_image_url('skill', skill_id, filename), 'image/png')
self.assertEqual(response.body, raw_image)
# Image context: Question Suggestions.
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'), 'rb',
encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/question_suggestions/%s' % (
self.IMAGE_UPLOAD_URL_PREFIX,
skill_id
),
{'filename': 'test.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),)
)
filename = response_dict['filename']
self.logout()
response = self.get_custom_response(
self._get_image_url('skill', skill_id, filename), 'image/png')
self.assertEqual(response.body, raw_image)
def test_non_matching_extensions_are_detected(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
filename_without_extension = 'test'
supplied_filename = ('%s.jpg' % filename_without_extension)
filename_with_correct_extension = (
'%s.png' % filename_without_extension)
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
# Pass JPG extension even though raw_image data is PNG.
# This test verifies that, when the filename extension differs from what
# the raw data 'appears' to be, the image is rejected.
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': supplied_filename},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', raw_image),)
)
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'],
'Expected a filename ending in .png, received test.jpg')
self.logout()
# Test that neither form of the image is stored.
self.get_json(
self._get_image_url('exploration', '0', supplied_filename),
expected_status_int=404)
self.get_json(
self._get_image_url(
'exploration', '0', filename_with_correct_extension),
expected_status_int=404)
def test_upload_empty_image(self):
"""Test upload of an empty image."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Upload an empty image.
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.png'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', ''),)
)
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(response_dict['error'], 'No image supplied')
self.logout()
def test_upload_bad_image(self):
"""Test upload of a malformed image."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Upload an invalid image.
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.png'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', 'non_image_data'),)
)
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(response_dict['error'], 'Image not recognized')
self.logout()
def test_upload_an_invalid_svg_image(self):
"""Test upload of an invalid SVG image."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Upload an invalid SVG image.
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.svg'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', '<badsvg></badsvg>'),)
)
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'],
'Unsupported tags/attributes found in the SVG:\ntags: '
'[\'badsvg\']\n')
self.logout()
def test_upload_a_large_svg(self):
"""Test upload of an SVG image that exceeds the 100 KB size limit."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Upload an SVG image that exceeds the file size limit of 100 KB.
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.svg'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=((
'image',
'unused_filename',
'<svg><path d="%s" /></svg>' % (
'M150 0 L75 200 L225 200 Z ' * 4000)),)
)
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'], 'Image exceeds file size limit of 100 KB.')
self.logout()
def test_get_invalid_image(self):
"""Test retrieval of invalid images."""
self.get_json(
self._get_image_url('exploration', '0', 'bad_image'),
expected_status_int=404)
def test_bad_filenames_are_detected(self):
# TODO(sll): Add more tests here.
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test/a.png'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', raw_image),),
)
self.assertEqual(response_dict['status_code'], 400)
error_msg = (
'Schema validation for \'filename\' failed: Validation failed: '
'is_regex_matched ({\'regex_pattern\': \'\\\\w+[.]\\\\w+\'}) '
'for object test/a.png')
self.assertIn(error_msg, response_dict['error'])
self.logout()
def test_missing_extensions_are_detected(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', raw_image),),
)
self.assertEqual(response_dict['status_code'], 400)
error_msg = (
'Schema validation for \'filename\' failed: Validation failed: '
'is_regex_matched ({\'regex_pattern\': \'\\\\w+[.]\\\\w+\'}) '
'for object test')
self.assertIn(error_msg, response_dict['error'])
self.logout()
def test_bad_extensions_are_detected(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_image = f.read()
response_dict = self.post_json(
'%s/exploration/0' % self.IMAGE_UPLOAD_URL_PREFIX,
{'filename': 'test.pdf'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('image', 'unused_filename', raw_image),),
)
self.assertEqual(response_dict['status_code'], 400)
self.assertIn(
'Expected a filename ending in .png, received test.pdf',
response_dict['error'])
self.logout()
def test_request_invalid_asset_type(self):
"""Test that requests for invalid asset type is rejected with a 404."""
self.login(self.EDITOR_EMAIL)
self.get_html_response(
'/assetsdevhandler/exploration/0/assets/unknowntype/myfile',
expected_status_int=404)
self.logout()
def test_get_response_with_dev_mode_disabled_returns_404_status(self):
self.login(self.EDITOR_EMAIL)
with self.swap(constants, 'EMULATOR_MODE', False):
self.get_json(
'/assetsdevhandler/exploration/0/assets/image/myfile',
expected_status_int=404)
self.logout()
class AssetDevHandlerAudioTest(test_utils.GenericTestBase):
"""Test the upload of audio files to GCS."""
TEST_AUDIO_FILE_MP3 = 'cafe.mp3'
TEST_AUDIO_FILE_FLAC = 'cafe.flac'
TEST_AUDIO_FILE_OVER_MAX_LENGTH = 'cafe-over-five-minutes.mp3'
TEST_AUDIO_FILE_MPEG_CONTAINER = 'test-mpeg-container.mp3'
AUDIO_UPLOAD_URL_PREFIX = '/createhandler/audioupload'
def setUp(self):
super(AssetDevHandlerAudioTest, self).setUp()
exp_services.delete_demo('0')
self.system_user = user_services.get_system_user()
exp_services.load_demo('0')
rights_manager.release_ownership_of_exploration(
self.system_user, '0')
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
mock_accepted_audio_extensions = {
'mp3': ['audio/mp3'],
'flac': ['audio/flac']
}
self.accepted_audio_extensions_swap = self.swap(
feconf, 'ACCEPTED_AUDIO_EXTENSIONS',
mock_accepted_audio_extensions)
def test_guest_can_not_upload(self):
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
response = self.post_json(
'%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX),
{'filename': self.TEST_AUDIO_FILE_MP3},
csrf_token=csrf_token,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),),
expected_status_int=401
)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_cannot_upload_audio_with_invalid_exp_id(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
self.post_json(
'%s/invalid_exp_id' % (self.AUDIO_UPLOAD_URL_PREFIX),
{'filename': self.TEST_AUDIO_FILE_MP3},
csrf_token=csrf_token,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),),
expected_status_int=404
)
self.logout()
def test_audio_upload(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
self.post_json(
'%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX),
{'filename': self.TEST_AUDIO_FILE_MP3},
csrf_token=csrf_token,
upload_files=(
('raw_audio_file', self.TEST_AUDIO_FILE_MP3, raw_audio),)
)
self.logout()
def test_audio_upload_with_non_mp3_file(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, '0'))
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_FLAC),
'rb', encoding=None
) as f:
raw_audio = f.read()
self.assertFalse(fs.isfile('audio/%s' % self.TEST_AUDIO_FILE_FLAC))
with self.accepted_audio_extensions_swap:
self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': self.TEST_AUDIO_FILE_FLAC},
csrf_token=csrf_token,
upload_files=[
('raw_audio_file', self.TEST_AUDIO_FILE_FLAC, raw_audio)]
)
self.assertTrue(fs.isfile('audio/%s' % self.TEST_AUDIO_FILE_FLAC))
self.logout()
def test_detect_non_matching_extensions(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Use an accepted audio extension in mismatched_filename
# that differs from the uploaded file's audio type.
mismatched_filename = 'test.flac'
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
with self.accepted_audio_extensions_swap:
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': mismatched_filename},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=[
('raw_audio_file', mismatched_filename, raw_audio)]
)
self.logout()
self.assertIn(
'Although the filename extension indicates the file is a flac '
'file, it was not recognized as one. Found mime types:',
response_dict['error'])
def test_detect_non_audio_file(self):
"""Test that filenames with extensions that don't match the audio are
detected.
"""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None
) as f:
raw_audio = f.read()
with self.accepted_audio_extensions_swap:
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': self.TEST_AUDIO_FILE_FLAC},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
self.assertEqual(
response_dict['error'], 'Audio not recognized as a flac file')
def test_audio_upload_mpeg_container(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(
feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MPEG_CONTAINER),
'rb', encoding=None
) as f:
raw_audio = f.read()
self.post_json(
'%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX),
{'filename': self.TEST_AUDIO_FILE_MPEG_CONTAINER},
csrf_token=csrf_token,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
def test_invalid_extension_is_detected(self):
"""Test that invalid extensions are caught."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
filename_without_extension = 'test'
invalid_extension = 'wav'
supplied_filename = (
'%s.%s' % (filename_without_extension, invalid_extension))
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
response_dict = self.post_json(
'%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX),
{'filename': supplied_filename},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'],
'Invalid filename extension: it should have '
'one of the following extensions: %s'
% list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()))
def test_upload_empty_audio(self):
"""Test upload of empty audio."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Upload empty audio.
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': 'test.mp3'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('raw_audio_file', 'unused_filename', ''),)
)
self.logout()
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(response_dict['error'], 'No audio supplied')
def test_upload_bad_audio(self):
"""Test upload of malformed audio."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': 'test.mp3'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(
('raw_audio_file', 'unused_filename', 'non_audio_data'),)
)
self.logout()
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'], 'Audio not recognized as a mp3 file')
def test_missing_extensions_are_detected(self):
"""Test upload of filenames with no extensions are caught."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
missing_extension_filename = 'test'
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
response_dict = self.post_json(
'%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX),
{'filename': missing_extension_filename},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'],
'No filename extension: it should have '
'one of the following extensions: '
'%s' % list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()))
def test_exceed_max_length_detected(self):
"""Test that audio file is less than max playback length."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(
feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_OVER_MAX_LENGTH),
'rb', encoding=None
) as f:
raw_audio = f.read()
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': 'test.mp3'},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
self.assertEqual(response_dict['status_code'], 400)
self.assertIn(
'Audio files must be under %s seconds in length'
% feconf.MAX_AUDIO_FILE_LENGTH_SEC, response_dict['error'])
def test_non_matching_extensions_are_detected(self):
"""Test that filenames with extensions that don't match the audio are
detected.
"""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Use an accepted audio extension in mismatched_filename
# that differs from the uploaded file's audio type.
mismatched_filename = 'test.mp3'
with python_utils.open_file(
os.path.join(
feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_FLAC),
'rb', encoding=None
) as f:
raw_audio = f.read()
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': mismatched_filename},
csrf_token=csrf_token,
expected_status_int=400,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
self.assertEqual(response_dict['status_code'], 400)
self.assertEqual(
response_dict['error'], 'Audio not recognized as a mp3 file')
def test_upload_check_for_duration_sec_as_response(self):
"""Tests the file upload and trying to confirm the
audio file duration_secs is accurate.
"""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3),
'rb', encoding=None
) as f:
raw_audio = f.read()
response_dict = self.post_json(
'%s/0' % self.AUDIO_UPLOAD_URL_PREFIX,
{'filename': self.TEST_AUDIO_FILE_MP3},
csrf_token=csrf_token,
expected_status_int=200,
upload_files=(('raw_audio_file', 'unused_filename', raw_audio),)
)
self.logout()
expected_value = ({
'filename': self.TEST_AUDIO_FILE_MP3,
'duration_secs': 15.255510204081633})
self.assertEqual(response_dict, expected_value)
class PromoBarHandlerTest(test_utils.GenericTestBase):
"""Test for the PromoBarHandler."""
def setUp(self):
super(PromoBarHandlerTest, self).setUp()
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup(
self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME)
self.add_user_role(
self.RELEASE_COORDINATOR_USERNAME,
feconf.ROLE_ID_RELEASE_COORDINATOR)
def test_get_promo_bar_data(self):
response = self.get_json('/promo_bar_handler')
self.assertEqual(
response, {
'promo_bar_enabled': False,
'promo_bar_message': ''
})
def test_release_coordinator_able_to_update_promo_bar_config(self):
self.login(self.RELEASE_COORDINATOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'/promo_bar_handler', {
'promo_bar_enabled': True,
'promo_bar_message': 'New promo bar message.'
}, csrf_token=csrf_token)
self.assertEqual(response, {})
response = self.get_json('/promo_bar_handler')
self.assertEqual(
response, {
'promo_bar_enabled': True,
'promo_bar_message': 'New promo bar message.'
})
self.logout()
|
|
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import functools
import hashlib
import inspect
import os
import re
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import timeutils
from toscaparser import tosca_template
from tacker.common import driver_manager
from tacker.common import exceptions
from tacker.common import log
from tacker.common import safe_utils
from tacker.common import utils
from tacker.conductor.conductorrpc import vnf_lcm_rpc
from tacker import manager
from tacker import objects
from tacker.objects import fields
from tacker.objects.fields import ErrorPoint as EP
from tacker.vnflcm import abstract_driver
from tacker.vnflcm import utils as vnflcm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_VNFLCM_MGMT_DRIVER = "vnflcm_noop"
@utils.expects_func_args('vnf_info', 'vnf_instance', 'scale_vnf_request')
def revert_to_error_scale(function):
"""Decorator to revert task_state to error on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
try:
vnf_info = keyed_args['vnf_info']
vnf_instance = keyed_args['vnf_instance']
scale_vnf_request = keyed_args['scale_vnf_request']
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = \
objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
if vnf_info.get('resource_changes'):
resource_changes = vnf_info.get('resource_changes')
else:
resource_changes = self._scale_resource_update(context,
vnf_info,
vnf_instance,
scale_vnf_request,
vim_connection_info,
error=True)
except Exception as e:
LOG.warning(traceback.format_exc())
LOG.warning("Failed to scale resource update "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
try:
self._vnfm_plugin._update_vnf_scaling_status_err(context,
vnf_info)
except Exception as e:
LOG.warning("Failed to revert scale info for event "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
try:
vnf_instance.task_state = None
self._vnf_instance_update(context, vnf_instance)
except Exception as e:
LOG.warning("Failed to revert instantiation info for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
problem = objects.ProblemDetails(status=500,
detail=str(ex))
try:
timestamp = datetime.utcnow()
vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ']
vnf_lcm_op_occ.operation_state = 'FAILED_TEMP'
vnf_lcm_op_occ.state_entered_time = timestamp
vnf_lcm_op_occ.resource_changes = resource_changes
vnf_lcm_op_occ.error = problem
vnf_lcm_op_occ.error_point = \
vnf_info['current_error_point']
vnf_lcm_op_occ.save()
except Exception as e:
LOG.warning("Failed to update vnf_lcm_op_occ for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
try:
notification = vnf_info['notification']
notification['notificationStatus'] = 'RESULT'
notification['operationState'] = 'FAILED_TEMP'
notification['error'] = problem.to_dict()
resource_dict = resource_changes.to_dict()
if resource_dict.get('affected_vnfcs'):
notification['affectedVnfcs'] =\
jsonutils.dump_as_bytes(
resource_dict.get('affected_vnfcs'))
if resource_dict.get('affected_virtual_links'):
notification['affectedVirtualLinks'] =\
jsonutils.dump_as_bytes(
resource_dict.get('affected_virtual_links'))
if resource_dict.get('affected_virtual_storages'):
notification['affectedVirtualStorages'] =\
jsonutils.dump_as_bytes(
resource_dict.get('affected_virtual_storages'))
self.rpc_api.send_notification(context, notification)
except Exception as e:
LOG.warning("Failed to revert scale info for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
return decorated_function
@utils.expects_func_args('vnf_instance')
def revert_to_error_task_state(function):
"""Decorator to revert task_state to error on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
vnf_instance = keyed_args['vnf_instance']
previous_task_state = vnf_instance.task_state
try:
self._vnf_instance_update(context, vnf_instance,
task_state=fields.VnfInstanceTaskState.ERROR)
LOG.info("Successfully reverted task state from "
"%(state)s to %(error)s on failure for vnf "
"instance %(id)s.",
{"state": previous_task_state,
"id": vnf_instance.id,
"error": fields.VnfInstanceTaskState.ERROR})
except Exception as e:
LOG.warning("Failed to revert task state for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
return decorated_function
@utils.expects_func_args('vnf_info', 'vnf_instance', 'operation_params')
def revert_to_error_rollback(function):
"""Decorator to revert task_state to error on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
resource_changes = None
try:
vnf_info = keyed_args['vnf_info']
vnf_instance = keyed_args['vnf_instance']
operation_params = keyed_args['operation_params']
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info =\
objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
if vnf_info.get('resource_changes'):
resource_changes = vnf_info.get('resource_changes')
else:
if vnf_lcm_op_occs.operation == 'SCALE':
scale_vnf_request =\
objects.ScaleVnfRequest.obj_from_primitive(
operation_params, context=context)
scale_vnf_request_copy = \
copy.deepcopy(scale_vnf_request)
scale_vnf_request_copy.type = 'SCALE_IN'
resource_changes = self._scale_resource_update(
context,
vnf_info,
vnf_instance,
scale_vnf_request_copy,
vim_connection_info,
error=True)
else:
resource_changes = self._term_resource_update(
context,
vnf_info,
vnf_instance)
except Exception as e:
LOG.warning(traceback.format_exc())
LOG.warning("Failed to scale resource update "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
try:
self._update_vnf_rollback_status_err(context, vnf_info)
except Exception as e:
LOG.warning("Failed to revert scale info for event "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
try:
self._vnf_instance_update(context, vnf_instance)
except Exception as e:
LOG.warning("Failed to revert instantiation info for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
problem = objects.ProblemDetails(status=500,
detail=str(ex))
try:
timestamp = datetime.utcnow()
vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ']
vnf_lcm_op_occ.operation_state = 'FAILED_TEMP'
vnf_lcm_op_occ.state_entered_time = timestamp
if resource_changes:
vnf_lcm_op_occ.resource_changes = resource_changes
vnf_lcm_op_occ.error = problem
vnf_lcm_op_occ.save()
except Exception as e:
LOG.warning("Failed to update vnf_lcm_op_occ for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
try:
notification = vnf_info['notification']
notification['notificationStatus'] = 'RESULT'
notification['operationState'] = 'FAILED_TEMP'
notification['error'] = problem.to_dict()
if resource_changes:
resource_dict = resource_changes.to_dict()
if resource_dict.get('affected_vnfcs'):
notification['affectedVnfcs'] = \
jsonutils.dump_as_bytes(
resource_dict.get('affected_vnfcs'))
if resource_dict.get('affected_virtual_links'):
notification['affectedVirtualLinks'] = \
jsonutils.dump_as_bytes(
resource_dict.get(
'affected_virtual_links'))
if resource_dict.get('affected_virtual_storages'):
notification['affectedVirtualStorages'] = \
jsonutils.dump_as_bytes(
resource_dict.get(
'affected_virtual_storages'))
self.rpc_api.send_notification(context, notification)
except Exception as e:
LOG.warning("Failed to revert scale info for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
return decorated_function
def config_opts():
return [('tacker', VnfLcmDriver.OPTS)]
class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
OPTS = [
cfg.ListOpt(
'vnflcm_infra_driver', default=['openstack', 'kubernetes'],
help=_('Hosting vnf drivers tacker plugin will use')
),
cfg.ListOpt(
'vnflcm_mgmt_driver', default=[DEFAULT_VNFLCM_MGMT_DRIVER],
help=_('MGMT driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')
)
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __init__(self):
super(VnfLcmDriver, self).__init__()
self.rpc_api = vnf_lcm_rpc.VNFLcmRPCAPI()
self._vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
self._vnf_manager = driver_manager.DriverManager(
'tacker.tacker.vnfm.drivers',
cfg.CONF.tacker.vnflcm_infra_driver)
self._mgmt_manager = driver_manager.DriverManager(
'tacker.tacker.mgmt.drivers', cfg.CONF.tacker.vnflcm_mgmt_driver)
self._mgmt_driver_hash = self._init_mgmt_driver_hash()
def _init_mgmt_driver_hash(self):
driver_hash = {}
for mgmt_driver in cfg.CONF.tacker.vnflcm_mgmt_driver:
path = inspect.getfile(self._mgmt_manager[mgmt_driver].__class__)
driver_hash[mgmt_driver] = self._get_file_hash(path)
return driver_hash
def _vnf_instance_update(self, context, vnf_instance, **kwargs):
"""Update vnf instance in the database using kwargs as value."""
for k, v in kwargs.items():
setattr(vnf_instance, k, v)
vnf_instance.save()
def _instantiate_vnf(self, context, vnf_instance, vnf_dict,
vim_connection_info, instantiate_vnf_req):
vnfd_dict = vnflcm_utils._get_vnfd_dict(context, vnf_instance.vnfd_id,
instantiate_vnf_req.flavour_id)
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
param_for_subs_map = vnflcm_utils._get_param_data(vnfd_dict,
instantiate_vnf_req)
package_uuid = vnflcm_utils._get_vnf_package_id(context,
vnf_instance.vnfd_id)
vnf_software_images = vnflcm_utils._create_grant_request(vnfd_dict,
package_uuid)
vnf_resources = self._vnf_manager.invoke(
vim_connection_info.vim_type, 'pre_instantiation_vnf',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
vnf_software_images=vnf_software_images,
instantiate_vnf_req=instantiate_vnf_req,
vnf_package_path=vnf_package_path)
# save the vnf resources in the db
for _, resources in vnf_resources.items():
for vnf_resource in resources:
vnf_resource.create()
vnfd_dict_to_create_final_dict = copy.deepcopy(vnfd_dict)
final_vnf_dict = vnflcm_utils._make_final_vnf_dict(
vnfd_dict_to_create_final_dict, vnf_instance.id,
vnf_instance.vnf_instance_name, param_for_subs_map, vnf_dict)
final_vnf_dict['before_error_point'] = \
vnf_dict['before_error_point']
try:
instance_id = self._vnf_manager.invoke(
vim_connection_info.vim_type, 'instantiate_vnf',
context=context, plugin=self._vnfm_plugin,
vnf_instance=vnf_instance,
vnfd_dict=final_vnf_dict, grant_response=vnf_resources,
vim_connection_info=vim_connection_info,
vnf_package_path=vnf_package_path,
instantiate_vnf_req=instantiate_vnf_req)
except Exception as exp:
with excutils.save_and_reraise_exception():
exp.reraise = False
LOG.error("Unable to instantiate vnf instance "
"%(id)s due to error : %(error)s",
{"id": vnf_instance.id, "error":
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfInstantiationFailed(
id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
if vnf_instance.instantiated_vnf_info and\
vnf_instance.instantiated_vnf_info.instance_id != instance_id:
# TODO(h-asahina): rename instance_id to stack_id
vnf_instance.instantiated_vnf_info.instance_id = instance_id
if vnf_dict['attributes'].get('scaling_group_names'):
vnf_instance.instantiated_vnf_info.scale_status = \
vnf_dict['scale_status']
elif vnf_instance.instantiated_vnf_info:
default_scale_status = vnflcm_utils.\
get_default_scale_status(
context=context,
vnf_instance=vnf_instance,
vnfd_dict=vnfd_dict)
if default_scale_status is not None:
vnf_instance.instantiated_vnf_info.scale_status = \
default_scale_status
if vnf_dict['before_error_point'] <= EP.PRE_VIM_CONTROL:
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'create_wait',
plugin=self._vnfm_plugin, context=context,
vnf_dict=final_vnf_dict,
vnf_id=final_vnf_dict['instance_id'],
auth_attr=vim_connection_info.access_info)
except Exception as exp:
with excutils.save_and_reraise_exception():
exp.reraise = False
LOG.error("Vnf creation wait failed for vnf instance "
"%(id)s due to error : %(error)s",
{"id": vnf_instance.id, "error":
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfInstantiationWaitFailed(
id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
elif vnf_dict['before_error_point'] == EP.POST_VIM_CONTROL:
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'update_stack_wait',
plugin=self._vnfm_plugin, context=context,
vnf_dict=final_vnf_dict,
stack_id=instance_id,
auth_attr=vim_connection_info.access_info)
except Exception as exp:
with excutils.save_and_reraise_exception():
exp.reraise = False
LOG.error("Vnf update wait failed for vnf instance "
"%(id)s due to error : %(error)s",
{"id": vnf_instance.id, "error":
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfInstantiationWaitFailed(
id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
if vnf_instance.instantiated_vnf_info.instance_id:
self._vnf_manager.invoke(vim_connection_info.vim_type,
'post_vnf_instantiation', context=context,
vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
instantiate_vnf_req=instantiate_vnf_req)
def _get_file_hash(self, path):
hash_obj = hashlib.sha256()
with open(path) as f:
hash_obj.update(f.read().encode('utf-8'))
return hash_obj.hexdigest()
def _check_mgmt_driver(self, artifact_mgmt_driver, artifacts_value,
vnf_package_path):
# check implementation and artifacts exist in cfg.CONF.tacker
if artifact_mgmt_driver not in self._mgmt_driver_hash:
LOG.error('The {} specified in the VNFD '
'is inconsistent with the MgmtDriver in '
'the configuration file.'.format(artifact_mgmt_driver))
raise exceptions.MgmtDriverInconsistent(
MgmtDriver=artifact_mgmt_driver)
# check file content
pkg_mgmt_driver_path = os.path.join(vnf_package_path,
artifacts_value[artifact_mgmt_driver]['file'])
pkg_mgmt_driver_hash = self._get_file_hash(pkg_mgmt_driver_path)
if pkg_mgmt_driver_hash == \
self._mgmt_driver_hash[artifact_mgmt_driver]:
return artifact_mgmt_driver
else:
LOG.error('The hash verification of VNF Package MgmtDriver '
'and Tacker MgmtDriver does not match.')
raise exceptions.MgmtDriverHashMatchFailure()
def _load_vnf_interface(self, context, method_name,
vnf_instance, vnfd_dict):
VNF_value = vnfd_dict['topology_template']['node_templates']['VNF']
tacker_mgmt_driver = DEFAULT_VNFLCM_MGMT_DRIVER
interfaces_vnflcm_value = \
VNF_value.get('interfaces', {}).get('Vnflcm', {})
if not interfaces_vnflcm_value:
return tacker_mgmt_driver
artifacts_value = VNF_value.get('artifacts')
if not artifacts_value:
return tacker_mgmt_driver
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
if interfaces_vnflcm_value.get(method_name):
artifact_mgmt_driver = interfaces_vnflcm_value.get(
method_name).get('implementation')
if artifact_mgmt_driver:
tacker_mgmt_driver = self._check_mgmt_driver(
artifact_mgmt_driver, artifacts_value, vnf_package_path)
return tacker_mgmt_driver
@log.log
def instantiate_vnf(self, context, vnf_instance, vnf_dict,
instantiate_vnf_req):
vnf_dict['current_error_point'] = EP.VNF_CONFIG_START
vim_connection_info_list = vnflcm_utils.\
_get_vim_connection_info_from_vnf_req(vnf_instance,
instantiate_vnf_req)
self._vnf_instance_update(context, vnf_instance,
vim_connection_info=vim_connection_info_list)
vim_info = vnflcm_utils._get_vim(context,
instantiate_vnf_req.vim_connection_info)
if vim_info['tenant_id'] != vnf_instance.tenant_id:
LOG.error('The target VNF %(id)s cannot be instantiate '
'from a VIM of a different tenant.',
{"id": vnf_instance.id})
raise exceptions.TenantMatchFailure(resource='VNF',
id=vnf_instance.id,
action='instantiate')
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id, instantiate_vnf_req.flavour_id)
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_START:
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request is not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'instantiate_start', vnf_instance, vnfd_dict),
'instantiate_start', context=context,
vnf_instance=vnf_instance,
instantiate_vnf_request=instantiate_vnf_req,
grant=vnf_dict.get('grant'), grant_request=None)
vnf_dict['current_error_point'] = EP.PRE_VIM_CONTROL
if vnf_dict['before_error_point'] <= EP.POST_VIM_CONTROL:
self._instantiate_vnf(context, vnf_instance, vnf_dict,
vim_connection_info, instantiate_vnf_req)
vnf_dict['current_error_point'] = EP.INTERNAL_PROCESSING
vnf_dict['current_error_point'] = EP.VNF_CONFIG_END
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_END:
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request is not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'instantiate_end', vnf_instance, vnfd_dict),
'instantiate_end', context=context,
vnf_instance=vnf_instance,
instantiate_vnf_request=instantiate_vnf_req,
grant=vnf_dict.get('grant'), grant_request=None)
@log.log
@revert_to_error_task_state
def terminate_vnf(self, context, vnf_instance, terminate_vnf_req,
vnf_dict):
vnf_dict['current_error_point'] = EP.VNF_CONFIG_START
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_START:
# TODO(LiangLu): grant_request and grant here is planned to
# pass as a parameter, however due to they are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'terminate_start', vnf_instance, vnfd_dict),
'terminate_start', context=context,
vnf_instance=vnf_instance,
terminate_vnf_request=terminate_vnf_req,
grant=None, grant_request=None)
vnf_dict['current_error_point'] = EP.PRE_VIM_CONTROL
LOG.info("Terminating vnf %s", vnf_instance.id)
try:
if vnf_dict['before_error_point'] <= EP.POST_VIM_CONTROL:
self._delete_vnf_instance_resources(context, vnf_instance,
vim_connection_info,
terminate_vnf_req=terminate_vnf_req)
vnf_dict['current_error_point'] = EP.INTERNAL_PROCESSING
vnf_instance.instantiated_vnf_info.reinitialize()
self._vnf_instance_update(context, vnf_instance,
vim_connection_info=[], task_state=None)
LOG.info("Vnf terminated %s successfully", vnf_instance.id)
except Exception as exp:
with excutils.save_and_reraise_exception():
if vnf_dict['current_error_point'] == EP.PRE_VIM_CONTROL:
if hasattr(vnf_instance.instantiated_vnf_info,
'instance_id'):
if vnf_instance.instantiated_vnf_info.instance_id:
vnf_dict['current_error_point'] = \
EP.POST_VIM_CONTROL
LOG.error("Unable to terminate vnf '%s' instance. "
"Error: %s", vnf_instance.id,
encodeutils.exception_to_unicode(exp))
vnf_dict['current_error_point'] = EP.VNF_CONFIG_END
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_END:
# TODO(LiangLu): grant_request and grant here is planned to
# pass as a parameter, however due to they are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'terminate_end', vnf_instance, vnfd_dict),
'terminate_end', context=context,
vnf_instance=vnf_instance,
terminate_vnf_request=terminate_vnf_req,
grant=None, grant_request=None)
def _delete_vnf_instance_resources(self, context, vnf_instance,
vim_connection_info, terminate_vnf_req=None,
update_instantiated_state=True):
if (vnf_instance.instantiated_vnf_info and
vnf_instance.instantiated_vnf_info.instance_id) or \
vim_connection_info.vim_type == 'kubernetes':
instance_id = vnf_instance.instantiated_vnf_info.instance_id \
if vnf_instance.instantiated_vnf_info else None
access_info = vim_connection_info.access_info
LOG.info("Deleting stack %(instance)s for vnf %(id)s ",
{"instance": instance_id, "id": vnf_instance.id})
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete', plugin=self, context=context,
vnf_id=instance_id, auth_attr=access_info,
vnf_instance=vnf_instance, terminate_vnf_req=terminate_vnf_req)
if update_instantiated_state:
vnf_instance.instantiation_state = \
fields.VnfInstanceState.NOT_INSTANTIATED
vnf_instance.save()
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete_wait', plugin=self, context=context,
vnf_id=instance_id, auth_attr=access_info,
vnf_instance=vnf_instance)
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, vnf_instance.id)
for vnf_resource in vnf_resources:
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete_vnf_instance_resource',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
vnf_resource=vnf_resource)
vnf_resource.destroy(context)
def _heal_vnf(self, context, vnf_instance, vim_connection_info,
heal_vnf_request, vnf):
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'heal_vnf',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
heal_vnf_request=heal_vnf_request)
except Exception as exp:
with excutils.save_and_reraise_exception() as exc_ctxt:
exc_ctxt.reraise = False
LOG.error("Failed to heal vnf %(id)s in infra driver. "
"Error: %(error)s", {"id": vnf_instance.id, "error":
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfHealFailed(id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
vnf['current_error_point'] = EP.POST_VIM_CONTROL
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'heal_vnf_wait',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
heal_vnf_request=heal_vnf_request)
except Exception as exp:
LOG.error("Failed to update vnf %(id)s resources for instance "
"%(instance)s. Error: %(error)s",
{'id': vnf_instance.id, 'instance':
inst_vnf_info.instance_id, 'error':
encodeutils.exception_to_unicode(exp)})
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'post_heal_vnf',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
heal_vnf_request=heal_vnf_request)
self._vnf_instance_update(context, vnf_instance, task_state=None)
except Exception as exp:
with excutils.save_and_reraise_exception() as exc_ctxt:
exc_ctxt.reraise = False
LOG.error("Failed to store updated resources information for "
"instance %(instance)s for vnf %(id)s. "
"Error: %(error)s",
{'id': vnf_instance.id, 'instance':
inst_vnf_info.instance_id, 'error':
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfHealFailed(id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
def _respawn_vnf(self, context, vnf_instance, vnf_dict,
vim_connection_info, heal_vnf_request):
if vnf_dict['before_error_point'] != EP.POST_VIM_CONTROL:
try:
self._delete_vnf_instance_resources(context, vnf_instance,
vim_connection_info, update_instantiated_state=False)
except Exception as exc:
with excutils.save_and_reraise_exception() as exc_ctxt:
exc_ctxt.reraise = False
err_msg = ("Failed to delete vnf resources for "
"vnf instance %(id)s before respawning. "
"The vnf is in inconsistent state. "
"Error: %(error)s")
LOG.error(err_msg % {"id": vnf_instance.id,
"error": str(exc)})
raise exceptions.VnfHealFailed(id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exc))
# InstantiateVnfRequest is not stored in the db as it's mapped
# to InstantiatedVnfInfo version object. Convert InstantiatedVnfInfo
# version object to InstantiateVnfRequest so that vnf can be
# instantiated.
instantiate_vnf_request = objects.InstantiateVnfRequest.\
from_vnf_instance(vnf_instance)
vnf_instance.instantiated_vnf_info.reinitialize()
vnf_instance.task_state = fields.VnfInstanceTaskState.INSTANTIATING
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id, instantiate_vnf_request.flavour_id)
if vnf_dict.get('vnf_instance_after'):
vnf_instance.instantiated_vnf_info = \
vnf_dict.get('vnf_instance_after').instantiated_vnf_info
else:
vnflcm_utils._build_instantiated_vnf_info(
vnfd_dict, instantiate_vnf_request, vnf_instance,
vim_connection_info.vim_id)
try:
self._instantiate_vnf(context, vnf_instance, vnf_dict,
vim_connection_info, instantiate_vnf_request)
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'post_vnf_instantiation',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
instantiate_vnf_req=instantiate_vnf_request)
except Exception as exc:
with excutils.save_and_reraise_exception() as exc_ctxt:
exc_ctxt.reraise = False
err_msg = ("Failed to instantiate vnf instance "
"%(id)s after termination. The vnf is in "
"inconsistent state. Error: %(error)s")
LOG.error(err_msg % {"id": vnf_instance.id,
"error": str(exc)})
raise exceptions.VnfHealFailed(id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exc))
self._vnf_instance_update(context, vnf_instance,
instantiation_state=fields.VnfInstanceState.INSTANTIATED,
task_state=None)
@log.log
@revert_to_error_task_state
def heal_vnf(self, context, vnf_instance, vnf_dict, heal_vnf_request):
vnf_dict['current_error_point'] = EP.VNF_CONFIG_START
LOG.info("Request received for healing vnf '%s'", vnf_instance.id)
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_START:
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'heal_start', vnf_instance, vnfd_dict),
'heal_start', context=context,
vnf_instance=vnf_instance,
heal_vnf_request=heal_vnf_request,
grant=vnf_dict.get('grant'), grant_request=None)
vnf_dict['current_error_point'] = EP.PRE_VIM_CONTROL
try:
heal_flag = False
if vnf_dict['before_error_point'] <= EP.POST_VIM_CONTROL:
if not heal_vnf_request.vnfc_instance_id:
self._respawn_vnf(context, vnf_instance, vnf_dict,
vim_connection_info, heal_vnf_request)
else:
heal_flag = True
self._heal_vnf(context, vnf_instance, vim_connection_info,
heal_vnf_request, vnf_dict)
LOG.info("Request received for healing vnf '%s' is completed "
"successfully", vnf_instance.id)
except Exception as exp:
with excutils.save_and_reraise_exception():
if vnf_dict['current_error_point'] == EP.PRE_VIM_CONTROL:
if not heal_flag:
if hasattr(vnf_instance.instantiated_vnf_info,
'instance_id'):
if vnf_instance.instantiated_vnf_info.instance_id:
vnf_dict['current_error_point'] = \
EP.POST_VIM_CONTROL
LOG.error("Unable to heal vnf '%s' instance. "
"Error: %s", heal_vnf_request.vnfc_instance_id,
encodeutils.exception_to_unicode(exp))
raise exceptions.VnfHealFailed(id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
vnf_dict['current_error_point'] = EP.VNF_CONFIG_END
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_END:
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'heal_end', vnf_instance, vnfd_dict),
'heal_end', context=context,
vnf_instance=vnf_instance,
heal_vnf_request=heal_vnf_request,
grant=vnf_dict.get('grant'), grant_request=None)
def _scale_vnf_pre(self, context, vnf_info, vnf_instance,
scale_vnf_request, vim_connection_info):
if vnf_info['before_error_point'] <= EP.NOTIFY_PROCESSING:
self._vnfm_plugin._update_vnf_scaling(
context, vnf_info, 'ACTIVE', 'PENDING_'
+ scale_vnf_request.type)
vnf_info['current_error_point'] = EP.VNF_CONFIG_START
scale_name_list = []
grp_id = None
vnf_info['policy_name'] = scale_vnf_request.aspect_id
if scale_vnf_request.type == 'SCALE_IN':
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
vnf_info['action'] = 'in'
scale_id_list, scale_name_list, grp_id, res_num = \
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'get_scale_in_ids',
plugin=self,
context=context,
vnf_dict=vnf_info,
is_reverse=scale_vnf_request.additional_params.get('\
is_reverse'),
auth_attr=vim_connection_info.access_info,
region_name=vim_connection_info.access_info.get('\
region_name'),
number_of_steps=scale_vnf_request.number_of_steps
)
vnf_info['res_num'] = res_num
if vnf_info['before_error_point'] <= EP.VNF_CONFIG_START:
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
if len(scale_id_list) != 0 or \
vim_connection_info.vim_type == 'kubernetes':
kwargs = {'scale_name_list': scale_name_list}
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'scale_start', vnf_instance, vnfd_dict),
'scale_start', context=context,
vnf_instance=vnf_instance,
scale_vnf_request=scale_vnf_request,
grant=vnf_info.get('grant'), grant_request=None,
**kwargs)
elif scale_vnf_request.type == 'SCALE_OUT':
vnf_info['action'] = 'out'
scale_id_list = self._vnf_manager.invoke(
vim_connection_info.vim_type,
'get_scale_ids',
plugin=self,
context=context,
vnf_dict=vnf_info,
auth_attr=vim_connection_info.access_info,
region_name=vim_connection_info.access_info.get('region_name')
)
else:
msg = 'Unknown vim type: %s' % vim_connection_info.vim_type
raise exceptions.VnfScaleFailed(id=vnf_info['instance_id'],
error=msg)
vnf_info['current_error_point'] = EP.PRE_VIM_CONTROL
return scale_id_list, scale_name_list, grp_id
def _get_node_template_for_vnf(self, vnfd_dict):
node_tmp = vnfd_dict['topology_template']['node_templates']
for node_template in node_tmp.values():
LOG.debug("node_template %s", node_template)
if not re.match('^tosca', node_template['type']):
LOG.debug("VNF node_template %s", node_template)
return node_template
return {}
def _scale_vnf_post(self, context, vnf_info, vnf_instance,
scale_vnf_request, vim_connection_info,
scale_id_list,
resource_changes):
vnf_info['current_error_point'] = EP.VNF_CONFIG_END
if scale_vnf_request.type == 'SCALE_OUT':
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
scale_id_after = self._vnf_manager.invoke(
vim_connection_info.vim_type,
'get_scale_ids',
plugin=self,
context=context,
vnf_dict=vnf_info,
auth_attr=vim_connection_info.access_info,
region_name=vim_connection_info.access_info.get('region_name')
)
id_list = list(set(scale_id_after) - set(scale_id_list))
vnf_info['res_num'] = len(scale_id_after)
if vnf_info['before_error_point'] <= EP.VNF_CONFIG_END:
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
if len(id_list) != 0 or \
vim_connection_info.vim_type == 'kubernetes':
kwargs = {'scale_out_id_list': id_list}
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'scale_end', vnf_instance, vnfd_dict),
'scale_end', context=context,
vnf_instance=vnf_instance,
scale_vnf_request=scale_vnf_request,
grant=vnf_info.get('grant'), grant_request=None,
**kwargs)
vnf_instance.instantiated_vnf_info.scale_level =\
vnf_info['after_scale_level']
if vim_connection_info.vim_type != 'kubernetes':
# NOTE(ueha): The logic of Scale for OpenStack VIM is widely hard
# coded with `vnf_info`. This dependency is to be refactored in
# future.
scaleGroupDict = \
jsonutils.loads(vnf_info['attributes']['scale_group'])
(scaleGroupDict
['scaleGroupDict'][scale_vnf_request.aspect_id]['default']) =\
vnf_info['res_num']
vnf_info['attributes']['scale_group'] =\
jsonutils.dump_as_bytes(scaleGroupDict)
if vnf_info['before_error_point'] < EP.NOTIFY_COMPLETED:
self._vnfm_plugin._update_vnf_scaling(context, vnf_info,
'PENDING_' + scale_vnf_request.type, 'ACTIVE')
vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ']
vnf_lcm_op_occ.operation_state = 'COMPLETED'
vnf_lcm_op_occ.resource_changes = resource_changes
vnf_lcm_op_occ.state_entered_time = timeutils.utcnow()
vnf_lcm_op_occ.save()
vnf_instance.task_state = None
vnf_instance.save()
vnf_info['current_error_point'] = EP.NOTIFY_COMPLETED
notification = vnf_info['notification']
notification['notificationStatus'] = 'RESULT'
notification['operationState'] = 'COMPLETED'
resource_dict = resource_changes.to_dict()
if resource_dict.get('affected_vnfcs'):
notification['affectedVnfcs'] = resource_dict.get('affected_vnfcs')
if resource_dict.get('affected_virtual_links'):
notification['affectedVirtualLinks'] =\
resource_dict.get('affected_virtual_links')
if resource_dict.get('affected_virtual_storages'):
notification['affectedVirtualStorages'] =\
resource_dict.get('affected_virtual_storages')
self.rpc_api.send_notification(context, notification)
def _scale_resource_update(self, context, vnf_info, vnf_instance,
scale_vnf_request,
vim_connection_info,
error=False):
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
instantiated_vnf_before = \
copy.deepcopy(vnf_instance.instantiated_vnf_info)
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_resource_update',
context=context,
vnf_instance=vnf_instance,
vnf_info=vnf_info,
scale_vnf_request=scale_vnf_request,
vim_connection_info=vim_connection_info
)
for scale in vnf_instance.instantiated_vnf_info.scale_status:
if scale_vnf_request.aspect_id == scale.aspect_id:
if not error:
scale.scale_level = vnf_info['after_scale_level']
break
else:
scale.scale_level = vnf_info['scale_level']
break
LOG.debug("vnf_instance.instantiated_vnf_info %s",
vnf_instance.instantiated_vnf_info)
affected_vnfcs = []
affected_virtual_storages = []
affected_virtual_links = []
if scale_vnf_request.type == 'SCALE_IN':
for vnfc in instantiated_vnf_before.vnfc_resource_info:
vnfc_delete = True
for rsc in vnf_instance.instantiated_vnf_info.\
vnfc_resource_info:
if vnfc.compute_resource.resource_id == \
rsc.compute_resource.resource_id:
vnfc_delete = False
break
if vnfc_delete:
affected_vnfc = objects.AffectedVnfc(id=vnfc.id,
vdu_id=vnfc.vdu_id,
change_type='REMOVED',
compute_resource=vnfc.compute_resource)
affected_vnfcs.append(affected_vnfc)
for st in instantiated_vnf_before.virtual_storage_resource_info:
st_delete = True
for rsc in vnf_instance.instantiated_vnf_info.\
virtual_storage_resource_info:
if st.storage_resource.resource_id == \
rsc.storage_resource.resource_id:
st_delete = False
break
if st_delete:
affected_st = objects.AffectedVirtualStorage(
id=st.id,
virtual_storage_desc_id=st.virtual_storage_desc_id,
change_type='REMOVED',
storage_resource=st.storage_resource)
affected_virtual_storages.append(affected_st)
for vl in instantiated_vnf_before.vnf_virtual_link_resource_info:
port_delete = False
for rsc in vnf_instance.\
instantiated_vnf_info.vnf_virtual_link_resource_info:
if vl.network_resource.resource_id == \
rsc.network_resource.resource_id:
if len(vl.vnf_link_ports) != len(rsc.vnf_link_ports):
port_delete = True
break
if port_delete:
affected_vl = objects.AffectedVirtualLink(
id=vl.id,
vnf_virtual_link_desc_id=vl.vnf_virtual_link_desc_id,
change_type='LINK_PORT_REMOVED',
network_resource=vl.network_resource)
affected_virtual_links.append(affected_vl)
else:
for rsc in vnf_instance.instantiated_vnf_info.vnfc_resource_info:
vnfc_add = True
for vnfc in instantiated_vnf_before.vnfc_resource_info:
if vnfc.compute_resource.resource_id == \
rsc.compute_resource.resource_id:
vnfc_add = False
break
if vnfc_add:
affected_vnfc = objects.AffectedVnfc(
id=rsc.id,
vdu_id=rsc.vdu_id,
change_type='ADDED',
compute_resource=rsc.compute_resource)
affected_vnfcs.append(affected_vnfc)
for rsc in vnf_instance.instantiated_vnf_info.\
virtual_storage_resource_info:
st_add = True
for st in instantiated_vnf_before.\
virtual_storage_resource_info:
if st.storage_resource.resource_id == \
rsc.storage_resource.resource_id:
st_add = False
break
if st_add:
affected_st = objects.AffectedVirtualStorage(
id=rsc.id,
virtual_storage_desc_id=rsc.virtual_storage_desc_id,
change_type='ADDED',
storage_resource=rsc.storage_resource)
affected_virtual_storages.append(affected_st)
for vl in instantiated_vnf_before.vnf_virtual_link_resource_info:
port_add = False
for rsc in vnf_instance.instantiated_vnf_info.\
vnf_virtual_link_resource_info:
if vl.network_resource.resource_id == \
rsc.network_resource.resource_id:
if len(vl.vnf_link_ports) != len(rsc.vnf_link_ports):
port_add = True
break
if port_add:
affected_vl = objects.AffectedVirtualLink(
id=vl.id,
vnf_virtual_link_desc_id=vl.vnf_virtual_link_desc_id,
change_type='LINK_PORT_ADDED',
network_resource=vl.network_resource)
affected_virtual_links.append(affected_vl)
resource_changes = objects.ResourceChanges()
resource_changes.affected_vnfcs = []
resource_changes.affected_virtual_links = []
resource_changes.affected_virtual_storages = []
if 'resource_changes' in \
vnf_lcm_op_occs and vnf_lcm_op_occs.resource_changes:
res_chg = vnf_lcm_op_occs.resource_changes
if 'affected_vnfcs' in res_chg:
if res_chg.affected_vnfcs and \
len(res_chg.affected_vnfcs) > 0:
resource_changes.affected_vnfcs.\
extend(res_chg.affected_vnfcs)
if 'affected_virtual_storages' in res_chg:
if res_chg.affected_virtual_storages and \
len(res_chg.affected_virtual_storages) > 0:
resource_changes.affected_virtual_storages.extend(
res_chg.affected_virtual_storages)
if 'affected_virtual_links' in res_chg:
if res_chg.affected_virtual_links and \
len(res_chg.affected_virtual_links) > 0:
resource_changes.affected_virtual_links.\
extend(res_chg.affected_virtual_links)
resource_changes.affected_vnfcs.extend(affected_vnfcs)
resource_changes.affected_virtual_storages.extend(
affected_virtual_storages)
resource_changes.affected_virtual_links = []
resource_changes.affected_virtual_links.extend(affected_virtual_links)
vnf_info['resource_changes'] = resource_changes
return resource_changes
@log.log
@revert_to_error_scale
def scale_vnf(self, context, vnf_info, vnf_instance, scale_vnf_request):
LOG.info("Request received for scale vnf '%s'", vnf_instance.id)
timestamp = datetime.utcnow()
vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ']
vnf_lcm_op_occ.operation_state = 'PROCESSING'
vnf_lcm_op_occ.state_entered_time = timestamp
LOG.debug("vnf_lcm_op_occ %s", vnf_lcm_op_occ)
vnf_lcm_op_occ.save()
notification = vnf_info['notification']
notification['operationState'] = 'PROCESSING'
self.rpc_api.send_notification(context, notification)
vnf_info['current_error_point'] = EP.NOTIFY_PROCESSING
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
scale_id_list, scale_name_list, grp_id = self._scale_vnf_pre(
context, vnf_info,
vnf_instance,
scale_vnf_request,
vim_connection_info)
if vnf_info['before_error_point'] <= EP.POST_VIM_CONTROL:
self._scale_vnf(context, vnf_info, vnf_instance, scale_vnf_request,
vim_connection_info, scale_name_list, grp_id,
vnf_lcm_op_occ)
resource_changes = self._scale_resource_update(context, vnf_info,
vnf_instance,
scale_vnf_request,
vim_connection_info)
vnf_info['current_error_point'] = EP.INTERNAL_PROCESSING
self._scale_vnf_post(context, vnf_info,
vnf_instance,
scale_vnf_request,
vim_connection_info,
scale_id_list,
resource_changes)
LOG.info("Request received for scale vnf '%s' is completed "
"successfully", vnf_instance.id)
def _scale_vnf(self, context, vnf_info, vnf_instance, scale_vnf_request,
vim_connection_info, scale_name_list, grp_id,
vnf_lcm_op_occ):
# action_driver
LOG.debug("vnf_info['vnfd']['attributes'] %s", (vnf_info
.get('vnfd', {})
.get('attributes')))
self._vnf_manager = driver_manager.DriverManager(
'tacker.tacker.vnfm.drivers',
cfg.CONF.tacker.infra_driver)
if scale_vnf_request.type == 'SCALE_IN':
action = 'in'
elif scale_vnf_request.type == 'SCALE_OUT':
action = 'out'
else:
msg = 'Unknown scale type: %s' % scale_vnf_request.type
raise exceptions.VnfScaleFailed(id=vnf_instance.id, error=msg)
stack_id = vnf_instance.instantiated_vnf_info.instance_id
# TODO(h-asahina): change the key name `instance_id` attr to `stack_id`
policy = {'instance_id': stack_id,
'name': scale_vnf_request.aspect_id,
'vnf': vnf_info,
'action': action}
LOG.debug(
"is_reverse: %s",
scale_vnf_request.additional_params.get('is_reverse'))
default = None
if vim_connection_info.vim_type == 'kubernetes':
policy['vnf_instance_id'] = vnf_lcm_op_occ.get('vnf_instance_id')
vnf_instance = objects.VnfInstance.get_by_id(context,
policy['vnf_instance_id'])
vnfd_dict = vnflcm_utils._get_vnfd_dict(context,
vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(
parsed_params={}, a_file=False, yaml_dict_tpl=vnfd_dict)
extract_policy_infos = vnflcm_utils.get_extract_policy_infos(tosca)
policy['vdu_defs'] = vnflcm_utils.get_target_vdu_def_dict(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id,
tosca=tosca)
policy['delta_num'] = vnflcm_utils.get_scale_delta_num(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id)
elif vim_connection_info.vim_type == 'openstack':
# NOTE(ueha): The logic of Scale for OpenStack VIM is widely hard
# coded with `vnf_info`. This dependency is to be refactored in
# future.
scale_json = vnf_info['attributes']['scale_group']
scale_group_dict = jsonutils.loads(scale_json)
key_aspect = scale_vnf_request.aspect_id
default = scale_group_dict['scaleGroupDict'][key_aspect]['default']
else:
msg = 'Unknown vim type: %s' % vim_connection_info.vim_type
raise exceptions.VnfScaleFailed(id=vnf_instance.id, error=msg)
if (scale_vnf_request.type == 'SCALE_IN' and
scale_vnf_request.additional_params['is_reverse'] == 'True'):
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_in_reverse',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
vnf_info=vnf_info,
scale_vnf_request=scale_vnf_request,
region_name=vim_connection_info.access_info.get('region_name'),
scale_name_list=scale_name_list,
grp_id=grp_id
)
vnf_info['current_error_point'] = EP.POST_VIM_CONTROL
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_update_wait',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
vnf_info=vnf_info,
region_name=vim_connection_info.access_info.get('region_name')
)
elif scale_vnf_request.type == 'SCALE_OUT' and default == 0:
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_out_initial',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
vnf_info=vnf_info,
scale_vnf_request=scale_vnf_request,
region_name=vim_connection_info.access_info.get('region_name')
)
vnf_info['current_error_point'] = EP.POST_VIM_CONTROL
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_update_wait',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
vnf_info=vnf_info,
region_name=vim_connection_info.access_info.get('region_name')
)
else:
for _ in range(scale_vnf_request.number_of_steps):
last_event_id = self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
policy=policy,
region_name=vim_connection_info.access_info.get('\
region_name')
)
vnf_info['current_error_point'] = EP.POST_VIM_CONTROL
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_wait',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
policy=policy,
region_name=vim_connection_info.access_info.get('\
region_name'),
last_event_id=last_event_id)
def _term_resource_update(self, context, vnf_info, vnf_instance,
error=False):
if not vnf_instance.instantiated_vnf_info:
resource_changes = objects.ResourceChanges()
resource_changes.affected_vnfcs = []
resource_changes.affected_virtual_links = []
resource_changes.affected_virtual_storages = []
vnf_info['resource_changes'] = resource_changes
return resource_changes
instantiated_vnf_before = copy.deepcopy(
vnf_instance.instantiated_vnf_info)
vnf_instance.instantiated_vnf_info.reinitialize()
if not error:
vnf_instance.vim_connection_info = []
vnf_instance.task_state = None
LOG.debug(
"vnf_instance.instantiated_vnf_info %s",
vnf_instance.instantiated_vnf_info)
affected_vnfcs = []
affected_virtual_storages = []
affected_virtual_links = []
for vnfc in instantiated_vnf_before.vnfc_resource_info:
vnfc_delete = True
for rsc in vnf_instance.instantiated_vnf_info.vnfc_resource_info:
if vnfc.compute_resource.resource_id == \
rsc.compute_resource.resource_id:
vnfc_delete = False
break
if vnfc_delete:
affected_vnfc = objects.AffectedVnfc(
id=vnfc.id,
vdu_id=vnfc.vdu_id,
change_type='REMOVED',
compute_resource=vnfc.compute_resource)
affected_vnfcs.append(affected_vnfc)
for st in instantiated_vnf_before.virtual_storage_resource_info:
st_delete = True
for rsc in \
vnf_instance.instantiated_vnf_info.\
virtual_storage_resource_info:
if st.storage_resource.resource_id == \
rsc.storage_resource.resource_id:
st_delete = False
break
if st_delete:
affected_st = objects.AffectedVirtualStorage(
id=st.id,
virtual_storage_desc_id=st.virtual_storage_desc_id,
change_type='REMOVED',
storage_resource=st.storage_resource)
affected_virtual_storages.append(affected_st)
for vl in instantiated_vnf_before.vnf_virtual_link_resource_info:
vm_delete = False
for rsc in \
vnf_instance.instantiated_vnf_info.\
vnf_virtual_link_resource_info:
if st.network_resource.resource_id == \
rsc.network_resource.resource_id:
vm_delete = False
break
if vm_delete:
affected_vl = objects.AffectedVirtualLink(
id=vl.id,
vnf_virtual_link_desc_id=vl.vnf_virtual_link_desc_id,
change_type='REMOVED',
network_resource=vl.network_resource)
affected_virtual_links.append(affected_vl)
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
resource_changes = objects.ResourceChanges()
resource_changes.affected_vnfcs = []
resource_changes.affected_virtual_links = []
resource_changes.affected_virtual_storages = []
if 'resource_changes' in vnf_lcm_op_occs and \
vnf_lcm_op_occs.resource_changes:
res_chg = vnf_lcm_op_occs.resource_changes
if 'affected_vnfcs' in res_chg:
if res_chg.affected_vnfcs and \
len(res_chg.affected_vnfcs) > 0:
resource_changes.affected_vnfcs.extend(
res_chg.affected_vnfcs)
if 'affected_virtual_storages' in res_chg:
if res_chg.affected_virtual_storages and \
len(res_chg.affected_virtual_storages) > 0:
resource_changes.affected_virtual_storages.extend(
res_chg.affected_virtual_storages)
if 'affected_virtual_links' in res_chg:
if res_chg.affected_virtual_links and \
len(res_chg.affected_virtual_links) > 0:
resource_changes.affected_virtual_links.extend(
res_chg.affected_virtual_links)
resource_changes.affected_vnfcs.extend(affected_vnfcs)
resource_changes.affected_virtual_storages.extend(
affected_virtual_storages)
resource_changes.affected_virtual_links.extend(affected_virtual_links)
vnf_info['resource_changes'] = resource_changes
return resource_changes
def _rollback_vnf_pre(
self,
context,
vnf_info,
vnf_instance,
operation_params,
vim_connection_info):
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
scale_id_list = []
scale_name_list = []
grp_id = None
self._update_vnf_rollback_pre(context, vnf_info)
if vnf_lcm_op_occs.operation == 'SCALE':
if vim_connection_info.vim_type != 'kubernetes':
# NOTE(ueha): The logic of Scale for OpenStack VIM is widely
# hard coded with `vnf_info`. This dependency is to be
# refactored in future.
scaleGroupDict = jsonutils.loads(
vnf_info['attributes']['scale_group'])
cap_size = scaleGroupDict['scaleGroupDict'][operation_params
['aspect_id']]['default']
vnf_info['res_num'] = cap_size
scale_vnf_request = objects.ScaleVnfRequest.obj_from_primitive(
operation_params, context=context)
for scale in vnf_instance.instantiated_vnf_info.scale_status:
if scale_vnf_request.aspect_id == scale.aspect_id:
vnf_info['after_scale_level'] = scale.scale_level
break
if vnf_lcm_op_occs.operation == 'SCALE' \
and vnf_lcm_op_occs.error_point >= EP.POST_VIM_CONTROL:
scale_id_list, scale_name_list, grp_id = self._vnf_manager.invoke(
vim_connection_info.vim_type,
'get_rollback_ids',
plugin=self,
context=context,
vnf_dict=vnf_info,
aspect_id=operation_params['aspect_id'],
auth_attr=vim_connection_info.access_info,
region_name=vim_connection_info.access_info.get('region_name')
)
if vnf_lcm_op_occs.error_point == EP.NOTIFY_COMPLETED:
if vnf_lcm_op_occs.operation == 'SCALE':
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
vnf_info['action'] = 'in'
if len(scale_id_list) != 0:
kwargs = {'scale_name_list': scale_name_list}
# TODO(LiangLu): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(context, 'scale_start',
vnf_instance, vnfd_dict),
'scale_start', context=context,
vnf_instance=vnf_instance,
scale_vnf_request=scale_vnf_request,
grant=vnf_info.get('grant'), grant_request=None,
**kwargs)
else:
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
# TODO(LiangLu): grant_request and grant here is planned to
# pass as a parameter, however due to they are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
if len(scale_id_list) != 0:
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'terminate_start',
vnf_instance, vnfd_dict),
'terminate_start', context=context,
vnf_instance=vnf_instance,
terminate_vnf_request=None,
grant=None, grant_request=None)
vnf_lcm_op_occs.error_point = EP.VNF_CONFIG_END
return scale_name_list, grp_id
def _rollback_vnf(
self,
context,
vnf_info,
vnf_instance,
operation_params,
vim_connection_info,
scale_name_list,
grp_id):
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
if vnf_lcm_op_occs.error_point >= EP.POST_VIM_CONTROL:
if vnf_lcm_op_occs.operation == 'SCALE':
scale_vnf_request = objects.ScaleVnfRequest.obj_from_primitive(
operation_params, context=context)
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_in_reverse',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
vnf_info=vnf_info,
scale_vnf_request=scale_vnf_request,
region_name=vim_connection_info.access_info.get(
'region_name'),
scale_name_list=scale_name_list,
grp_id=grp_id)
self._vnf_manager.invoke(
vim_connection_info.vim_type,
'scale_update_wait',
plugin=self,
context=context,
auth_attr=vim_connection_info.access_info,
vnf_info=vnf_info,
region_name=vim_connection_info.access_info.get(
'region_name'))
else:
instance_id = vnf_instance.instantiated_vnf_info.instance_id
access_info = vim_connection_info.access_info
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete', plugin=self, context=context,
vnf_id=instance_id, auth_attr=access_info,
vnf_instance=vnf_instance)
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete_wait', plugin=self, context=context,
vnf_id=instance_id, auth_attr=access_info,
vnf_instance=vnf_instance)
vnf_lcm_op_occs.error_point = EP.PRE_VIM_CONTROL
def _update_vnf_rollback_pre(self, context, vnf_info):
self._vnfm_plugin._update_vnf_rollback_pre(context, vnf_info)
def _update_vnf_rollback(self, context, vnf_info,
vnf_instance, vnf_lcm_op_occs):
if vnf_lcm_op_occs.operation == 'SCALE':
status = 'ACTIVE'
else:
status = 'INACTIVE'
vnf_instance.task_state = None
self._vnfm_plugin._update_vnf_rollback(context, vnf_info, 'ERROR',
status)
vnf_lcm_op_occs.state_entered_time = timeutils.utcnow()
vnf_lcm_op_occs.save()
vnf_instance.save()
def _update_vnf_rollback_status_err(self, context, vnf_info):
self._vnfm_plugin.update_vnf_rollback_status_err(context, vnf_info)
def _rollback_vnf_post(
self,
context,
vnf_info,
vnf_instance,
operation_params,
vim_connection_info):
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
if vnf_lcm_op_occs.operation == 'SCALE':
scale_vnf_request = objects.ScaleVnfRequest.obj_from_primitive(
operation_params, context=context)
scale_vnf_request_copy = copy.deepcopy(scale_vnf_request)
scale_vnf_request_copy.type = 'SCALE_IN'
resource_changes = self._scale_resource_update(context, vnf_info,
vnf_instance,
scale_vnf_request_copy,
vim_connection_info)
else:
vnfd_dict = vnflcm_utils.get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
# TODO(LiangLu): grant_request and grant here is planned to
# pass as a parameter, however due to they are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'terminate_end',
vnf_instance, vnfd_dict),
'terminate_end', context=context,
vnf_instance=vnf_instance,
terminate_vnf_request=None,
grant=None, grant_request=None)
resource_changes = self._term_resource_update(
context, vnf_info, vnf_instance)
vnf_lcm_op_occs.error_point = EP.VNF_CONFIG_START
timestamp = datetime.utcnow()
vnf_lcm_op_occs.operation_state = 'ROLLED_BACK'
vnf_lcm_op_occs.state_entered_time = timestamp
vnf_lcm_op_occs.resource_changes = resource_changes
self._update_vnf_rollback(context, vnf_info,
vnf_instance,
vnf_lcm_op_occs)
notification = vnf_info['notification']
notification['notificationStatus'] = 'RESULT'
notification['operationState'] = 'ROLLED_BACK'
resource_dict = resource_changes.to_dict()
if resource_dict.get('affected_vnfcs'):
notification['affectedVnfcs'] = resource_dict.get('affected_vnfcs')
if resource_dict.get('affected_virtual_links'):
notification['affectedVirtualLinks'] = \
resource_dict.get('affected_virtual_links')
if resource_dict.get('affected_virtual_storages'):
notification['affectedVirtualStorages'] = \
resource_dict.get('affected_virtual_storages')
self.rpc_api.send_notification(context, notification)
@log.log
@revert_to_error_rollback
def rollback_vnf(self, context, vnf_info, vnf_instance, operation_params):
LOG.info("Request received for rollback vnf '%s'", vnf_instance.id)
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
if vnf_lcm_op_occs.operation == 'SCALE':
scale_vnf_request = objects.ScaleVnfRequest.obj_from_primitive(
operation_params, context=context)
for scale in vnf_instance.instantiated_vnf_info.scale_status:
if scale_vnf_request.aspect_id == scale.aspect_id:
vnf_info['after_scale_level'] = scale.scale_level
break
timestamp = datetime.utcnow()
vnf_lcm_op_occs.operation_state = 'ROLLING_BACK'
vnf_lcm_op_occs.state_entered_time = timestamp
LOG.debug("vnf_lcm_op_occs %s", vnf_lcm_op_occs)
insta_url = CONF.vnf_lcm.endpoint_url + \
"/vnflcm/v1/vnf_instances/" + \
vnf_instance.id
vnflcm_url = CONF.vnf_lcm.endpoint_url + \
"/vnflcm/v1/vnf_lcm_op_occs/" + \
vnf_lcm_op_occs.id
notification = {}
notification['notificationType'] = \
'VnfLcmOperationOccurrenceNotification'
notification['vnfInstanceId'] = vnf_instance.id
notification['notificationStatus'] = 'START'
notification['operation'] = vnf_lcm_op_occs.operation
notification['operationState'] = 'ROLLING_BACK'
if vnf_lcm_op_occs.operation == 'SCALE':
notification['isAutomaticInvocation'] = \
vnf_lcm_op_occs.is_automatic_invocation
else:
notification['isAutomaticInvocation'] = False
notification['vnfLcmOpOccId'] = vnf_lcm_op_occs.id
notification['_links'] = {}
notification['_links']['vnfInstance'] = {}
notification['_links']['vnfInstance']['href'] = insta_url
notification['_links']['vnfLcmOpOcc'] = {}
notification['_links']['vnfLcmOpOcc']['href'] = vnflcm_url
vnf_info['notification'] = notification
vnf_lcm_op_occs.save()
self.rpc_api.send_notification(context, notification)
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
scale_name_list, grp_id = self._rollback_vnf_pre(
context, vnf_info, vnf_instance,
operation_params, vim_connection_info)
self._rollback_vnf(
context,
vnf_info,
vnf_instance,
operation_params,
vim_connection_info,
scale_name_list,
grp_id)
self._rollback_vnf_post(
context,
vnf_info,
vnf_instance,
operation_params,
vim_connection_info)
def _change_ext_conn_vnf(self, context, vnf_instance, vnf_dict,
vim_connection_info, change_ext_conn_req):
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'change_ext_conn_vnf',
context=context, vnf_instance=vnf_instance, vnf_dict=vnf_dict,
vim_connection_info=vim_connection_info,
change_ext_conn_req=change_ext_conn_req)
except Exception as exp:
with excutils.save_and_reraise_exception() as exc_ctxt:
exc_ctxt.reraise = False
LOG.error("Failed to change external connectivity "
"vnf %(id)s in infra driver. "
"Error: %(error)s", {"id": vnf_instance.id, "error":
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfChangeExtConnFailed(id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
vnf_dict['current_error_point'] = fields.ErrorPoint.POST_VIM_CONTROL
try:
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'change_ext_conn_vnf_wait',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info)
except Exception as exp:
LOG.error("Failed to update vnf %(id)s resources for instance "
"%(instance)s. Error: %(error)s",
{'id': vnf_instance.id, 'instance':
inst_vnf_info.instance_id, 'error':
encodeutils.exception_to_unicode(exp)})
raise exceptions.VnfChangeExtConnWaitFailed(
id=vnf_instance.id,
error=encodeutils.exception_to_unicode(exp))
@log.log
@revert_to_error_task_state
def change_ext_conn_vnf(
self,
context,
vnf_instance,
vnf_dict,
change_ext_conn_req):
LOG.info("Request received for changing external connectivity "
"vnf '%s'", vnf_instance.id)
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
vnf_dict['current_error_point'] = EP.VNF_CONFIG_START
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_START:
# TODO(esto-aln): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'change_external_connectivity_start',
vnf_instance, vnfd_dict),
'change_external_connectivity_start', context=context,
vnf_instance=vnf_instance,
change_ext_conn_request=change_ext_conn_req,
grant=vnf_dict.get('grant'), grant_request=None)
vnf_dict['current_error_point'] = EP.PRE_VIM_CONTROL
if vnf_dict['before_error_point'] <= EP.POST_VIM_CONTROL:
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = \
objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
self._change_ext_conn_vnf(context, vnf_instance, vnf_dict,
vim_connection_info, change_ext_conn_req)
# Since there is no processing corresponding to
# EP.INTERNAL_PROCESSING, it transitions to EP.VNF_CONFIG_END.
vnf_dict['current_error_point'] = EP.VNF_CONFIG_END
if vnf_dict['before_error_point'] <= EP.VNF_CONFIG_END:
# TODO(esto-aln): grant_request here is planned to pass
# as a parameter, however due to grant_request are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'change_external_connectivity_end',
vnf_instance, vnfd_dict),
'change_external_connectivity_end', context=context,
vnf_instance=vnf_instance,
change_ext_conn_request=change_ext_conn_req,
grant=vnf_dict.get('grant'), grant_request=None)
LOG.info("Request received for changing external connectivity "
"vnf '%s' is completed successfully", vnf_instance.id)
|
|
from __future__ import absolute_import, print_function, division
import unittest
from datetime import date
from decimal import Decimal
from pony.orm.core import *
from pony.orm.sqltranslation import IncomparableTypesError
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
db = Database()
class Department(db.Entity):
number = PrimaryKey(int, auto=True)
name = Required(str, unique=True)
groups = Set("Group")
courses = Set("Course")
class Group(db.Entity):
number = PrimaryKey(int)
major = Required(str)
dept = Required("Department")
students = Set("Student")
class Course(db.Entity):
name = Required(str)
semester = Required(int)
lect_hours = Required(int)
lab_hours = Required(int)
credits = Required(int)
dept = Required(Department)
students = Set("Student")
PrimaryKey(name, semester)
class Student(db.Entity):
id = PrimaryKey(int, auto=True)
name = Required(str)
dob = Required(date)
tel = Optional(str)
picture = Optional(buffer, lazy=True)
gpa = Required(float, default=0)
phd = Optional(bool)
group = Required(Group)
courses = Set(Course)
class TestSQLTranslator2(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_database(db)
with db_session:
d1 = Department(number=1, name="Department of Computer Science")
d2 = Department(number=2, name="Department of Mathematical Sciences")
d3 = Department(number=3, name="Department of Applied Physics")
c1 = Course(name="Web Design", semester=1, dept=d1,
lect_hours=30, lab_hours=30, credits=3)
c2 = Course(name="Data Structures and Algorithms", semester=3, dept=d1,
lect_hours=40, lab_hours=20, credits=4)
c3 = Course(name="Linear Algebra", semester=1, dept=d2,
lect_hours=30, lab_hours=30, credits=4)
c4 = Course(name="Statistical Methods", semester=2, dept=d2,
lect_hours=50, lab_hours=25, credits=5)
c5 = Course(name="Thermodynamics", semester=2, dept=d3,
lect_hours=25, lab_hours=40, credits=4)
c6 = Course(name="Quantum Mechanics", semester=3, dept=d3,
lect_hours=40, lab_hours=30, credits=5)
g101 = Group(number=101, major='B.E. in Computer Engineering', dept=d1)
g102 = Group(number=102, major='B.S./M.S. in Computer Science', dept=d2)
g103 = Group(number=103, major='B.S. in Applied Mathematics and Statistics', dept=d2)
g104 = Group(number=104, major='B.S./M.S. in Pure Mathematics', dept=d2)
g105 = Group(number=105, major='B.E in Electronics', dept=d3)
g106 = Group(number=106, major='B.S./M.S. in Nuclear Engineering', dept=d3)
Student(id=1, name='John Smith', dob=date(1991, 3, 20), tel='123-456', gpa=3, group=g101, phd=True,
courses=[c1, c2, c4, c6])
Student(id=2, name='Matthew Reed', dob=date(1990, 11, 26), gpa=3.5, group=g101, phd=True,
courses=[c1, c3, c4, c5])
Student(id=3, name='Chuan Qin', dob=date(1989, 2, 5), gpa=4, group=g101,
courses=[c3, c5, c6])
Student(id=4, name='Rebecca Lawson', dob=date(1990, 4, 18), tel='234-567', gpa=3.3, group=g102,
courses=[c1, c4, c5, c6])
Student(id=5, name='Maria Ionescu', dob=date(1991, 4, 23), gpa=3.9, group=g102,
courses=[c1, c2, c4, c6])
Student(id=6, name='Oliver Blakey', dob=date(1990, 9, 8), gpa=3.1, group=g102,
courses=[c1, c2, c5])
Student(id=7, name='Jing Xia', dob=date(1988, 12, 30), gpa=3.2, group=g102,
courses=[c1, c3, c5, c6])
@classmethod
def tearDownClass(cls):
teardown_database(db)
def setUp(self):
rollback()
db_session.__enter__()
def tearDown(self):
rollback()
db_session.__exit__()
def test_distinct1(self):
q = select(c.students for c in Course)
self.assertEqual(q._translator.distinct, True)
self.assertEqual(q.count(), 7)
def test_distinct3(self):
q = select(d for d in Department if len(s for c in d.courses for s in c.students) > len(s for s in Student))
self.assertEqual(q[:], [])
self.assertTrue('DISTINCT' in db.last_sql)
def test_distinct4(self):
q = select(d for d in Department if len(d.groups.students) > 3)
self.assertEqual(q[:], [Department[2]])
self.assertTrue("DISTINCT" not in db.last_sql)
def test_distinct5(self):
result = set(select(s for s in Student))
self.assertEqual(result, {Student[1], Student[2], Student[3], Student[4], Student[5], Student[6], Student[7]})
def test_distinct6(self):
result = set(select(s for s in Student).distinct())
self.assertEqual(result, {Student[1], Student[2], Student[3], Student[4], Student[5], Student[6], Student[7]})
def test_not_null1(self):
q = select(g for g in Group if '123-45-67' not in g.students.tel and g.dept == Department[1])
not_null = "IS_NOT_NULL COLUMN student tel" in (" ".join(str(i) for i in flatten(q._translator.conditions)))
self.assertEqual(not_null, True)
self.assertEqual(q[:], [Group[101]])
def test_not_null2(self):
q = select(g for g in Group if 'John' not in g.students.name and g.dept == Department[1])
not_null = "IS_NOT_NULL COLUMN student name" in (" ".join(str(i) for i in flatten(q._translator.conditions)))
self.assertEqual(not_null, False)
self.assertEqual(q[:], [Group[101]])
def test_chain_of_attrs_inside_for1(self):
result = set(select(s for d in Department if d.number == 2 for s in d.groups.students))
self.assertEqual(result, {Student[4], Student[5], Student[6], Student[7]})
def test_chain_of_attrs_inside_for2(self):
pony.options.SIMPLE_ALIASES = False
result = set(select(s for d in Department if d.number == 2 for s in d.groups.students))
self.assertEqual(result, {Student[4], Student[5], Student[6], Student[7]})
pony.options.SIMPLE_ALIASES = True
def test_non_entity_result1(self):
result = select((s.name, s.group.number) for s in Student if s.name.startswith("J"))[:]
self.assertEqual(sorted(result), [(u'Jing Xia', 102), (u'John Smith', 101)])
def test_non_entity_result2(self):
result = select((s.dob.year, s.group.number) for s in Student)[:]
self.assertEqual(sorted(result), [(1988, 102), (1989, 101), (1990, 101), (1990, 102), (1991, 101), (1991, 102)])
def test_non_entity_result3(self):
result = select(s.dob.year for s in Student).without_distinct()
self.assertEqual(sorted(result), [1988, 1989, 1990, 1990, 1990, 1991, 1991])
result = select(s.dob.year for s in Student)[:] # test the last query didn't override the cached one
self.assertEqual(sorted(result), [1988, 1989, 1990, 1991])
def test_non_entity_result3a(self):
result = select(s.dob.year for s in Student)[:]
self.assertEqual(sorted(result), [1988, 1989, 1990, 1991])
def test_non_entity_result4(self):
result = set(select(s.name for s in Student if s.name.startswith('M')))
self.assertEqual(result, {u'Matthew Reed', u'Maria Ionescu'})
def test_non_entity_result5(self):
result = select((s.group, s.dob) for s in Student if s.group == Group[101])[:]
self.assertEqual(sorted(result), [(Group[101], date(1989, 2, 5)), (Group[101], date(1990, 11, 26)), (Group[101], date(1991, 3, 20))])
def test_non_entity_result6(self):
result = select((c, s) for s in Student for c in Course if c.semester == 1 and s.id < 3)[:]
self.assertEqual(sorted(result), sorted([(Course[u'Linear Algebra',1], Student[1]), (Course[u'Linear Algebra',1],
Student[2]), (Course[u'Web Design',1], Student[1]), (Course[u'Web Design',1], Student[2])]))
def test_non_entity7(self):
result = set(select(s for s in Student if (s.name, s.dob) not in (((s2.name, s2.dob) for s2 in Student if s.group.number == 101))))
self.assertEqual(result, {Student[4], Student[5], Student[6], Student[7]})
@raises_exception(IncomparableTypesError, "Incomparable types 'int' and 'Set of Student' in expression: g.number == g.students")
def test_incompartible_types(self):
select(g for g in Group if g.number == g.students)
@raises_exception(TranslationError, "External parameter 'x' cannot be used as query result")
def test_external_param1(self):
x = Student[1]
select(x for s in Student)
def test_external_param2(self):
x = Student[1]
result = set(select(s for s in Student if s.name != x.name))
self.assertEqual(result, {Student[2], Student[3], Student[4], Student[5], Student[6], Student[7]})
@raises_exception(TypeError, "Use select(...) function or Group.select(...) method for iteration")
def test_exception1(self):
for g in Group:
pass
@raises_exception(MultipleObjectsFoundError, "Multiple objects were found. Use select(...) to retrieve them")
def test_exception2(self):
get(s for s in Student)
def test_exists(self):
result = exists(s for s in Student)
@raises_exception(ExprEvalError, "`db.FooBar` raises AttributeError: 'Database' object has no attribute 'FooBar'")
def test_entity_not_found(self):
select(s for s in db.Student for g in db.FooBar)
def test_keyargs1(self):
result = set(select(s for s in Student if s.dob < date(year=1990, month=10, day=20)))
self.assertEqual(result, {Student[3], Student[4], Student[6], Student[7]})
def test_query_as_string1(self):
result = set(select('s for s in Student if 3 <= s.gpa < 4'))
self.assertEqual(result, {Student[1], Student[2], Student[4], Student[5], Student[6], Student[7]})
def test_query_as_string2(self):
result = set(select('s for s in db.Student if 3 <= s.gpa < 4'))
self.assertEqual(result, {Student[1], Student[2], Student[4], Student[5], Student[6], Student[7]})
def test_str_subclasses(self):
result = select(d for d in Department for g in d.groups for c in d.courses if g.number == 106 and c.name.startswith('T'))[:]
self.assertEqual(result, [Department[3]])
def test_unicode_subclass(self):
class Unicode2(str):
pass
u2 = Unicode2(u'\xf0')
select(s for s in Student if len(u2) == 1)
def test_bool(self):
result = set(select(s for s in Student if s.phd == True))
self.assertEqual(result, {Student[1], Student[2]})
def test_bool2(self):
result = list(select(s for s in Student if s.phd + 1 == True))
self.assertEqual(result, [])
def test_bool3(self):
result = list(select(s for s in Student if s.phd + 1.1 == True))
self.assertEqual(result, [])
def test_bool4(self):
result = list(select(s for s in Student if s.phd + Decimal('1.1') == True))
self.assertEqual(result, [])
def test_bool5(self):
x = True
result = set(select(s for s in Student if s.phd == True and (False or (True and x))))
self.assertEqual(result, {Student[1], Student[2]})
def test_bool6(self):
x = False
result = list(select(s for s in Student if s.phd == (False or (True and x)) and s.phd is True))
self.assertEqual(result, [])
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
#
# seL4 System Call Stub Generator
# ===============================
#
# 2009 David Greenaway
#
# This script generates system call stubs based on an XML specification of the
# objects that the kernel exports (and the methods those objects export).
#
# Previously, Magpie (an IDL compiler) was used to generate these stubs. As
# Magpie development progressed, support for a fixed ABI (i.e., the ABI
# implemented by the seL4 kernel) was lost, and support for generating
# alignment-safe code (required by platforms such as ARM) was also removed.
#
# This script is a stop-gap until these features can be restored in Magpie
# once again.
#
# The script has certain limitations:
#
# * It must be told the size of all types. This includes complex types
# such as structures.
#
# We generate code that will cause compilation to fail if we get any
# object's size wrong, which should help mitigate the number of bugs caused
# because of this script becoming out of date compared to the source files.
#
# * The word-size is fixed at 32 bits, and we may implicitly assume that
# sizeof(int) == sizeof(long) == 32.
#
# Though the constant 'WORD_SIZE_BITS' has been used throughout, there
# may be implicit assumptions hanging around causing things to fail.
#
# * The script has only been tested on the actual seL4 API XML description.
#
# No stress testing has taken place; there may be bugs if new and wonderful
# XML method descriptions are added.
#
import xml.dom.minidom
import optparse
# Number of bits in a standard word
WORD_SIZE_BITS = 32
# Maximum number of words that will be in a message.
MAX_MESSAGE_LENGTH = 32
MESSAGE_REGISTERS_FOR_ARCH = {
"arm": 4,
"ia32": 2,
}
class Type(object):
"""
This class represents a C type (such as an 'int', structure or
pointer.
"""
def __init__(self, name, size_bits, double_word=False, native_size_bits=None):
"""
Define a new type, named 'name' that is 'size_bits' bits
long.
"""
self.name = name
self.size_bits = size_bits
self.double_word = double_word
#
# Store the number of bits C will use for this type
# in its native unpacked form.
#
# Required for 'bool', for example, which only uses 1
# bit when packed, but 32 bits when unpacked.
#
if native_size_bits:
self.native_size_bits = native_size_bits
else:
self.native_size_bits = size_bits
def pass_by_reference(self):
return self.size_bits > WORD_SIZE_BITS and not self.double_word
def render_parameter_name(self, name):
"""
Return a string of C code that would be used in a function
parameter declaration.
"""
return "%s %s" % (self.name, name)
def pointer(self):
"""
Return a new Type class representing a pointer to this
object.
"""
return PointerType(self)
def c_expression(self, var_name, word_num=0):
"""
Return code for a C expression that gets word 'word_num'
of this type.
"""
assert word_num == 0
return "%s" % var_name
def double_word_expression(self, var_name, word_num):
assert(word_num == 0 or word_num == 1)
if word_num == 0:
return "(uint{0}_t) {1}".format(WORD_SIZE_BITS, var_name)
elif word_num == 1:
return "(uint{0}_t) ({1} >> {0})".format(WORD_SIZE_BITS, var_name)
class PointerType(Type):
"""
A pointer to a standard type.
"""
def __init__(self, base_type):
Type.__init__(self, base_type.name, WORD_SIZE_BITS)
self.base_type = base_type
def render_parameter_name(self, name):
return "%s *%s" % (self.name, name)
def c_expression(self, var_name, word_num=0):
assert word_num == 0
return "*%s" % var_name
def pointer(self):
raise NotImplementedError()
class CapType(Type):
"""
A type that is just a typedef of seL4_CPtr.
"""
def __init__(self, name):
Type.__init__(self, name, WORD_SIZE_BITS)
class StructType(Type):
"""
A C 'struct' definition.
"""
def __init__(self, name, size_bits):
Type.__init__(self, name, size_bits)
def c_expression(self, var_name, word_num, member_name):
assert word_num < self.size_bits / WORD_SIZE_BITS
# Multiword structure.
assert self.pass_by_reference()
return "%s->%s" % (var_name, member_name[word_num])
class BitFieldType(Type):
"""
A special C 'struct' generated by the bitfield generator
"""
def __init__(self, name, size_bits):
Type.__init__(self, name, size_bits)
def c_expression(self, var_name, word_num=0):
return "%s.words[%d]" % (var_name, word_num)
class Parameter(object):
def __init__(self, name, type):
self.name = name
self.type = type
#
# Return the size (in bits) of a particular type.
#
types = [
# Simple Types
Type("uint8_t", 8),
Type("uint16_t", 16),
Type("uint32_t", 32),
Type("uint64_t", 64, double_word=True),
Type("int", WORD_SIZE_BITS),
Type("bool", 1, native_size_bits=8),
Type("seL4_Word", WORD_SIZE_BITS),
Type("seL4_CapRights", WORD_SIZE_BITS),
# seL4 Structures
BitFieldType("seL4_CapData_t", WORD_SIZE_BITS),
# Object types
CapType("seL4_CPtr"),
CapType("seL4_CNode"),
CapType("seL4_IRQHandler"),
CapType("seL4_IRQControl"),
CapType("seL4_TCB"),
CapType("seL4_Untyped"),
CapType("seL4_DomainSet"),
]
#
# Arch-specific types.
#
arch_types = {
"arm" : [
Type("seL4_ARM_VMAttributes", WORD_SIZE_BITS),
CapType("seL4_ARM_Page"),
CapType("seL4_ARM_PageTable"),
CapType("seL4_ARM_PageDirectory"),
CapType("seL4_ARM_ASIDControl"),
CapType("seL4_ARM_ASIDPool"),
StructType("seL4_UserContext", WORD_SIZE_BITS * 17),
],
"ia32" : [
Type("seL4_IA32_VMAttributes", WORD_SIZE_BITS),
CapType("seL4_IA32_ASIDControl"),
CapType("seL4_IA32_ASIDPool"),
CapType("seL4_IA32_IOSpace"),
CapType("seL4_IA32_IOPort"),
CapType("seL4_IA32_Page"),
CapType("seL4_IA32_PageDirectory"),
CapType("seL4_IA32_PageTable"),
CapType("seL4_IA32_IOPageTable"),
StructType("seL4_UserContext", WORD_SIZE_BITS * 13),
]
}
# Retrieve a member list for a given struct type
def struct_members(type, structs):
members = [member for struct_name, member in structs if struct_name == type.name]
assert len(members) == 1
return members[0]
# Keep increasing the given number 'x' until 'x % a == 0'.
def align_up(x, a):
if x % a == 0:
return x
return x + a - (x % a)
def get_parameter_positions(parameters):
"""
Determine where each parameter should be packed in the generated message.
We generate a list of:
(param_name, param_type, first_bit, num_bits)
tuples.
We guarantee that either (num_words == 1) or (bit_offset == 0).
"""
words_used = 0
bits_used = 0
results = []
for param in parameters:
# How big are we?
type_size = param.type.size_bits
# We need everything to be a power of two, or word sized.
assert ((type_size & (type_size - 1)) == 0) or (type_size % WORD_SIZE_BITS == 0)
# Align up to our own size, or the next word. (Whichever is smaller)
bits_used = align_up(bits_used, min(type_size, WORD_SIZE_BITS))
# Place ourself.
results.append((param, bits_used, type_size))
bits_used += type_size
return results
def generate_param_list(input_params, output_params):
# Generate parameters
params = []
for param in input_params:
if not param.type.pass_by_reference():
params.append(param.type.render_parameter_name(param.name))
else:
params.append(param.type.pointer().render_parameter_name(param.name))
for param in output_params:
if param.type.pass_by_reference():
params.append(param.type.pointer().render_parameter_name(param.name))
return ", ".join(params)
def generate_marshal_expressions(params, num_mrs, structs):
"""
Generate marshalling expressions for the given set of inputs.
We return a list of expressions; one expression per word required
to marshal all the inputs.
"""
def generate_param_code(param, first_bit, num_bits, word_array):
"""
Generate code to marshal the given parameter into the correct
location in the message.
'word_array' is an array of the final contents of the message.
word_array[k] contains what should be placed in the k'th message
register, and is an array of expressions that will (eventually)
be bitwise-or'ed into it.
"""
target_word = first_bit / WORD_SIZE_BITS
target_offset = first_bit % WORD_SIZE_BITS
# double word type
if param.type.double_word:
word_array[target_word].append(param.type.double_word_expression(param.name, 0))
word_array[target_word + 1].append(param.type.double_word_expression(param.name, 1))
return
# Single full word?
if num_bits == WORD_SIZE_BITS:
assert target_offset == 0
expr = param.type.c_expression(param.name);
word_array[target_word].append(expr)
return
# Part of a word?
if num_bits < WORD_SIZE_BITS:
expr = param.type.c_expression(param.name);
expr = "(%s & %#x)" % (expr, (1 << num_bits) - 1)
if target_offset:
expr = "(%s << %d)" % (expr, target_offset)
word_array[target_word].append(expr)
return
# Multiword array
assert target_offset == 0
num_words = num_bits / WORD_SIZE_BITS
for i in range(num_words):
expr = param.type.c_expression(param.name, i, struct_members(param.type, structs));
word_array[target_word + i].append(expr)
# Get their marshalling positions
positions = get_parameter_positions(params)
# Generate marshal code.
words = [[] for _ in range(num_mrs, MAX_MESSAGE_LENGTH)]
for (param, first_bit, num_bits) in positions:
generate_param_code(param, first_bit, num_bits, words)
# Return list of expressions.
return [" | ".join(x) for x in words if len(x) > 0]
def generate_unmarshal_expressions(params):
"""
Generate unmarshalling expressions for the given set of outputs.
We return a list of list of expressions; one list per variable, containing
expressions for the words in it that must be unmarshalled. The expressions
will have tokens of the form:
"%(w0)s"
in them, indicating a read from a word in the message.
"""
def unmarshal_single_param(first_bit, num_bits):
"""
Unmarshal a single parameter.
"""
first_word = first_bit / WORD_SIZE_BITS
bit_offset = first_bit % WORD_SIZE_BITS
# Multiword type?
if num_bits > WORD_SIZE_BITS:
result = []
for x in range(num_bits / WORD_SIZE_BITS):
result.append("%%(w%d)s" % (x + first_word))
return result
# Otherwise, bit packed.
if num_bits == WORD_SIZE_BITS:
return ["%%(w%d)s" % first_word]
elif bit_offset == 0:
return ["(%%(w%d)s & %#x)" % (
first_word, (1 << num_bits) - 1)]
else:
return ["(%%(w%d)s >> %d) & %#x" % (
first_word, bit_offset, (1 << num_bits) - 1)]
# Get their marshalling positions
positions = get_parameter_positions(params)
# Generate the unmarshal code.
results = []
for (param, first_bit, num_bits) in positions:
results.append((param, unmarshal_single_param(first_bit, num_bits)))
return results
def generate_result_struct(interface_name, method_name, output_params):
"""
Generate a structure definition to be returned by the system call stubs to
the user.
We have a few constraints:
* We always need an 'error' output parameter, even though it won't
appear in the list 'output_params' given to us.
* Output parameters may be marked as 'pass_by_reference', indicating
that we only ever see pointers to the item.
If no structure is needed (i.e., we just return an error code), we return
'None'.
"""
# Do we actually need a structure?
if len([x for x in output_params if not x.type.pass_by_reference()]) == 0:
return None
#
# Generate the structure:
#
# struct seL4_CNode_Copy {
# int error;
# seL4_Word foo;
# };
# typedef struct seL4_CNode_Copy seL4_CNode_Copy_t;
#
result = []
result.append("struct %s_%s {" % (interface_name, method_name))
result.append("\tint error;")
for i in output_params:
if not i.type.pass_by_reference():
result.append("\t%s;" % i.type.render_parameter_name(i.name))
result.append("};")
result.append("typedef struct %s_%s %s_%s_t;" % (
(interface_name, method_name, interface_name, method_name)))
result.append("")
return "\n".join(result)
def generate_stub(arch, interface_name, method_name, method_id, input_params, output_params, structs, use_only_ipc_buffer):
result = []
if use_only_ipc_buffer:
num_mrs = 0
else:
num_mrs = MESSAGE_REGISTERS_FOR_ARCH[arch]
# Split out cap parameters and standard parameters
standard_params = []
cap_params = []
for x in input_params:
if isinstance(x.type, CapType):
cap_params.append(x)
else:
standard_params.append(x)
# Determine if we are returning a structure, or just the error code.
returning_struct = False
results_structure = generate_result_struct(interface_name, method_name, output_params)
if results_structure:
return_type = "%s_%s_t" % (interface_name, method_name)
returning_struct = True
else:
return_type = "int"
#
# Print function header.
#
# static inline int
# seL4_Untyped_Retype(...)
# {
#
result.append("static inline %s" % return_type)
result.append("%s_%s(%s)" % (interface_name, method_name,
generate_param_list(input_params, output_params)))
result.append("{")
#
# Get a list of expressions for our caps and inputs.
#
input_expressions = generate_marshal_expressions(standard_params, num_mrs, structs)
cap_expressions = [x.name for x in cap_params]
service_cap = cap_expressions[0]
cap_expressions = cap_expressions[1:]
#
# Compute how many words the inputs and output will require.
#
input_param_words = len(input_expressions)
output_param_words = sum([p.type.size_bits for p in output_params]) / WORD_SIZE_BITS
#
# Setup variables we will need.
#
if returning_struct:
result.append("\t%s result;" % return_type)
result.append("\tseL4_MessageInfo_t tag = seL4_MessageInfo_new(%s, 0, %d, %d);" % (method_id, len(cap_expressions), len(input_expressions)))
result.append("\tseL4_MessageInfo_t output_tag;")
for i in range(min(num_mrs, max(input_param_words, output_param_words))):
result.append("\tseL4_Word mr%d;" % i)
result.append("")
#
# Copy capabilities.
#
# /* Setup input capabilities. */
# seL4_SetCap(i, cap);
#
if len(cap_expressions) > 0:
result.append("\t/* Setup input capabilities. */")
for i in range(len(cap_expressions)):
result.append("\tseL4_SetCap(%d, %s);" % (i, cap_expressions[i]))
result.append("")
#
# Copy in the inputs.
#
# /* Marshal input parameters. */
# seL4_SetMR(i, v);
# ...
#
if len(input_expressions) > 0:
result.append("\t/* Marshal input parameters. */")
for i in range(len(input_expressions)):
if i < num_mrs:
result.append("\tmr%d = %s;" % (i, input_expressions[i]))
else:
result.append("\tseL4_SetMR(%d, %s);" % (i, input_expressions[i]))
result.append("")
#
# Generate the call.
#
call_arguments = []
for i in range(num_mrs):
if i < max(input_param_words, output_param_words):
call_arguments.append("&mr%d" % i)
else:
call_arguments.append("NULL")
if use_only_ipc_buffer:
result.append("\t/* Perform the call. */")
result.append("\toutput_tag = seL4_Call(%s, tag);" % service_cap)
else:
result.append("\t/* Perform the call, passing in-register arguments directly. */")
result.append("\toutput_tag = seL4_CallWithMRs(%s, tag," % (service_cap))
result.append("\t\t%s);" % ', '.join(
[call_arguments[i] for i in range(num_mrs)]))
result.append("")
#
# Generate unmarshalling code.
#
if len(output_params) > 0:
result.append("\t/* Unmarshal result. */")
source_words = {}
for i in range(MAX_MESSAGE_LENGTH):
if i < num_mrs:
source_words["w%d" % i] = "mr%d" % i;
else:
source_words["w%d" % i] = "seL4_GetMR(%d)" % i;
unmashalled_params = generate_unmarshal_expressions(output_params)
for (param, words) in unmashalled_params:
if param.type.pass_by_reference():
members = struct_members(param.type, structs);
for i in range(len(words)):
result.append("\t%s->%s = %s;" % (param.name, members[i], words[i] % source_words))
else:
if param.type.double_word:
result.append("\tresult.%s = ((uint64_t)%s + ((uint64_t)%s << 32));" % (param.name, words[0] % source_words, words[1] % source_words))
else:
for word in words:
result.append("\tresult.%s = %s;" % (param.name, word % source_words))
result.append("")
# Return result
if returning_struct:
result.append("\tresult.error = seL4_MessageInfo_get_label(output_tag);")
result.append("\treturn result;")
else:
result.append("\treturn seL4_MessageInfo_get_label(output_tag);")
#
# }
#
result.append("}")
return "\n".join(result) + "\n"
def parse_xml_file(input_file, valid_types):
"""
Parse an XML file containing method definitions.
"""
# Create a dictionary of type name to type.
type_names = {}
for i in valid_types:
type_names[i.name] = i
# Parse the XML to generate method structures.
methods = []
structs = []
doc = xml.dom.minidom.parse(input_file)
for struct in doc.getElementsByTagName("struct"):
struct_members = []
struct_name = struct.getAttribute("name")
for members in struct.getElementsByTagName("member"):
member_name = members.getAttribute("name")
struct_members.append(member_name)
structs.append( (struct_name, struct_members) )
for interface in doc.getElementsByTagName("interface"):
interface_name = interface.getAttribute("name")
for method in interface.getElementsByTagName("method"):
method_name = method.getAttribute("name")
method_id = method.getAttribute("id")
#
# Get parameters.
#
# We always have an implicit cap parameter.
#
input_params = [Parameter("service", type_names[interface_name])]
output_params = []
for param in method.getElementsByTagName("param"):
param_name = param.getAttribute("name")
param_type = type_names.get(param.getAttribute("type"))
if not param_type:
raise Exception("Unknown type '%s'." % (param.getAttribute("type")))
param_dir = param.getAttribute("dir")
assert (param_dir == "in") or (param_dir == "out")
if (param_dir == "in"):
input_params.append(Parameter(param_name, param_type))
else:
output_params.append(Parameter(param_name, param_type))
methods.append((interface_name, method_name, method_id, input_params, output_params))
return (methods, structs)
def generate_stub_file(arch, input_files, output_file, use_only_ipc_buffer):
"""
Generate a header file containing system call stubs for seL4.
"""
result = []
# Ensure architecture looks sane.
if not arch in arch_types.keys():
raise Exception("Invalid architecture. Expected %s.",
" or ".join(arch_types.keys()))
# Parse XML
methods = []
structs = []
for file in input_files:
method, struct = parse_xml_file(file, types + arch_types[arch])
methods += method
structs += struct
# Print header.
result.append("""
/*
* Automatically generated system call stubs.
*/
#ifndef __LIBSEL4_SEL4_CLIENT_H
#define __LIBSEL4_SEL4_CLIENT_H
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include "sel4/types.h"
#include "sel4/invocation.h"
#include "sel4/arch/functions.h"
#include "sel4/arch/syscalls.h"
""");
#
# Emit code to ensure that all of our type sizes are consistent with
# the compiler's.
#
result.append("""
/*
* The following code generates a compile-time error if the system call
* stub generator has an incorrect understanding of how large a type is.
*
* If you receive a compile-time error here, you will need to adjust
* the type information in the stub generator.
*/
#define assert_size_correct(type, expected_bytes) \\
typedef unsigned long __type_##type##_size_incorrect[ \\
(sizeof(type) == expected_bytes) ? 1 : -1]
""")
for x in types + arch_types[arch]:
result.append("assert_size_correct(%s, %d);" % (x.name, x.native_size_bits / 8))
result.append("")
#
# Generate structures needed to return results back to the user.
#
# We can not use pass-by-reference (except for really large objects), as
# the verification framework does not support them.
#
result.append("/*")
result.append(" * Return types for generated methods.")
result.append(" */")
for (interface_name, method_name, _, _, output_params) in methods:
results_structure = generate_result_struct(interface_name, method_name, output_params)
if results_structure:
result.append(results_structure)
#
# Generate the actual stub code.
#
result.append("/*")
result.append(" * Generated stubs.")
result.append(" */")
for (interface_name, method_name, method_id, inputs, outputs) in methods:
result.append(generate_stub(arch, interface_name, method_name,
method_id, inputs, outputs, structs, use_only_ipc_buffer))
# Print footer.
result.append("#endif /* __LIBSEL4_SEL4_CLIENT_H */")
result.append("")
# Write the output
output = open(output_file, "w")
output.write("\n".join(result))
output.close()
def main():
#
# Read command line arguments.
#
parser = optparse.OptionParser(
usage = "usage: %prog -a <arch> [-o <ouput file] <input XML> [<input XML> ...]")
parser.add_option("-a", "--arch",
dest="arch", help="Architecture to generate stubs for.")
parser.add_option("-o", "--output",
dest="output", help="Output file to write stub to.")
parser.add_option("-b", "--buffer", action="store_true",
help="Use IPC buffer exclusively (i.e. do not pass syscall "
"arguments by registers).")
(options, args) = parser.parse_args()
# Validate arguments
if len(args) < 1:
parser.error("Require at least one input file.")
if not options.arch:
parser.error("Require an architecture to be specified.")
if not options.output:
options.output = "/dev/stdout"
input_files = args
# Generate the stubs.
generate_stub_file(options.arch, input_files, options.output, options.buffer)
main()
|
|
"""This platform enables the possibility to control a MQTT alarm."""
import logging
import re
import voluptuous as vol
from homeassistant.components import mqtt
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
CONF_CODE,
CONF_DEVICE,
CONF_NAME,
CONF_VALUE_TEMPLATE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_UNIQUE_ID,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_CODE_DISARM_REQUIRED = "code_disarm_required"
CONF_PAYLOAD_DISARM = "payload_disarm"
CONF_PAYLOAD_ARM_HOME = "payload_arm_home"
CONF_PAYLOAD_ARM_AWAY = "payload_arm_away"
CONF_PAYLOAD_ARM_NIGHT = "payload_arm_night"
CONF_COMMAND_TEMPLATE = "command_template"
DEFAULT_COMMAND_TEMPLATE = "{{action}}"
DEFAULT_ARM_NIGHT = "ARM_NIGHT"
DEFAULT_ARM_AWAY = "ARM_AWAY"
DEFAULT_ARM_HOME = "ARM_HOME"
DEFAULT_DISARM = "DISARM"
DEFAULT_NAME = "MQTT Alarm"
PLATFORM_SCHEMA = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(CONF_CODE_DISARM_REQUIRED, default=True): cv.boolean,
vol.Optional(
CONF_COMMAND_TEMPLATE, default=DEFAULT_COMMAND_TEMPLATE
): cv.template,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_AWAY, default=DEFAULT_ARM_AWAY): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_HOME, default=DEFAULT_ARM_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_NIGHT, default=DEFAULT_ARM_NIGHT): cv.string,
vol.Optional(CONF_PAYLOAD_DISARM, default=DEFAULT_DISARM): cv.string,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Required(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT alarm control panel through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT alarm control panel dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT alarm control panel."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
config, async_add_entities, config_entry, discovery_hash
)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(alarm.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
config, async_add_entities, config_entry=None, discovery_hash=None
):
"""Set up the MQTT Alarm Control Panel platform."""
async_add_entities([MqttAlarm(config, config_entry, discovery_hash)])
class MqttAlarm(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
alarm.AlarmControlPanel,
):
"""Representation of a MQTT alarm status."""
def __init__(self, config, config_entry, discovery_hash):
"""Init the MQTT Alarm Control Panel."""
self._state = None
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe mqtt events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._config = config
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = self.hass
command_template = self._config[CONF_COMMAND_TEMPLATE]
command_template.hass = self.hass
@callback
def message_received(msg):
"""Run when new MQTT message has been received."""
payload = msg.payload
if value_template is not None:
payload = value_template.async_render_with_possible_json_value(
msg.payload, self._state
)
if payload not in (
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
):
_LOGGER.warning("Received unexpected payload: %s", msg.payload)
return
self._state = payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def code_format(self):
"""Return one or more digits/characters."""
code = self._config.get(CONF_CODE)
if code is None:
return None
if isinstance(code, str) and re.search("^\\d+$", code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
code_required = self._config.get(CONF_CODE_ARM_REQUIRED)
return code_required
async def async_alarm_disarm(self, code=None):
"""Send disarm command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_DISARM_REQUIRED]
if code_required and not self._validate_code(code, "disarming"):
return
payload = self._config[CONF_PAYLOAD_DISARM]
self._publish(code, payload)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming home"):
return
action = self._config[CONF_PAYLOAD_ARM_HOME]
self._publish(code, action)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming away"):
return
action = self._config[CONF_PAYLOAD_ARM_AWAY]
self._publish(code, action)
async def async_alarm_arm_night(self, code=None):
"""Send arm night command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming night"):
return
action = self._config[CONF_PAYLOAD_ARM_NIGHT]
self._publish(code, action)
def _publish(self, code, action):
"""Publish via mqtt."""
command_template = self._config[CONF_COMMAND_TEMPLATE]
values = {"action": action, "code": code}
payload = command_template.async_render(**values)
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
def _validate_code(self, code, state):
"""Validate given code."""
conf_code = self._config.get(CONF_CODE)
check = conf_code is None or code == conf_code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
|
|
# Note: These views are viewable only by people in the analyzers
# group. They should all have the analyzer_required decorator.
#
# Also, because we have this weird thing with new users,
# they should also have the check_new_user decorator.
#
# Thus each view should be structured like this:
#
# @check_new_user
# @analyzer_required
# def my_view(request):
# ...
#
# Also, since it's just analyzers, everyone is expected to speak
# English. Ergo, there's no localization here.
from collections import defaultdict
from datetime import date, datetime, timedelta
from pprint import pformat
import csv
from elasticutils.contrib.django import F, es_required_or_50x
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.utils.encoding import force_bytes
from django.views.generic.edit import FormView
from django.utils.decorators import method_decorator
from fjord.analytics.forms import OccurrencesComparisonForm, ProductsUpdateForm
from fjord.analytics.utils import (
counts_to_options,
zero_fill)
from fjord.base.helpers import locale_name
from fjord.base.utils import (
analyzer_required,
check_new_user,
smart_int,
smart_date,
smart_str
)
from fjord.feedback.helpers import country_name
from fjord.feedback.models import Product, Response, ResponseMappingType
from fjord.heartbeat.models import Answer, Survey
from fjord.journal.models import Record
from fjord.search.utils import es_error_statsd
@check_new_user
@analyzer_required
def hb_data(request, answerid=None):
"""View for hb data that shows one or all of the answers"""
sortby = 'id'
answer = None
answers = []
survey = None
if answerid is not None:
answer = Answer.objects.get(id=answerid)
else:
sortby = request.GET.get('sortby', sortby)
page = request.GET.get('page')
answers = Answer.objects.order_by('-' + sortby)
survey = request.GET.get('survey', survey)
if survey:
try:
survey = Survey.objects.get(id=survey)
answers = answers.filter(survey_id=survey)
except Survey.DoesNotExist:
survey = None
paginator = Paginator(answers, 100)
try:
answers = paginator.page(page)
except PageNotAnInteger:
answers = paginator.page(1)
except EmptyPage:
answers = paginator.page(paginator.num_pages)
def fix_ts(ts):
ts = float(ts / 1000)
return datetime.fromtimestamp(ts)
return render(request, 'analytics/analyzer/hb_data.html', {
'sortby': sortby,
'answer': answer,
'answers': answers,
'fix_ts': fix_ts,
'pformat': pformat,
'survey': survey,
'surveys': Survey.objects.all(),
})
@check_new_user
@analyzer_required
def hb_surveys(request, answerid=None):
"""View for hb that shows Survey objects"""
page = request.GET.get('page')
paginator = Paginator(Survey.objects.order_by('-created'), 25)
try:
surveys = paginator.page(page)
except PageNotAnInteger:
surveys = paginator.page(1)
except EmptyPage:
surveys = paginator.page(paginator.num_pages)
return render(request, 'analytics/analyzer/hb_surveys.html', {
'surveys': surveys
})
@check_new_user
@analyzer_required
def hb_errorlog(request, errorid=None):
"""View for hb errorlog that shows one or all of the errors"""
error = None
errors = []
if errorid is not None:
error = Record.objects.get(id=errorid)
else:
page = request.GET.get('page')
paginator = Paginator(
Record.objects.filter(app='heartbeat').order_by('-id'), 100)
try:
errors = paginator.page(page)
except PageNotAnInteger:
errors = paginator.page(1)
except EmptyPage:
errors = paginator.page(paginator.num_pages)
return render(request, 'analytics/analyzer/hb_errorlog.html', {
'error': error,
'errors': errors,
'pformat': pformat
})
@check_new_user
@analyzer_required
def analytics_dashboard(request):
"""Main page for analytics related things"""
template = 'analytics/analyzer/dashboard.html'
return render(request, template)
@check_new_user
@analyzer_required
@es_required_or_50x(error_template='analytics/es_down.html')
@es_error_statsd
def analytics_occurrences(request):
template = 'analytics/analyzer/occurrences.html'
first_facet_bi = None
first_params = {}
first_facet_total = 0
second_facet_bi = None
second_params = {}
second_facet_total = 0
if 'product' in request.GET:
form = OccurrencesComparisonForm(request.GET)
if form.is_valid():
cleaned = form.cleaned_data
# First item
first_resp_s = (ResponseMappingType.search()
.filter(product=cleaned['product'])
.filter(locale__startswith='en'))
first_params['product'] = cleaned['product']
if cleaned['first_version']:
first_resp_s = first_resp_s.filter(
version=cleaned['first_version'])
first_params['version'] = cleaned['first_version']
if cleaned['first_start_date']:
first_resp_s = first_resp_s.filter(
created__gte=cleaned['first_start_date'])
first_params['date_start'] = cleaned['first_start_date']
if cleaned['first_end_date']:
first_resp_s = first_resp_s.filter(
created__lte=cleaned['first_end_date'])
first_params['date_end'] = cleaned['first_end_date']
if cleaned['first_search_term']:
first_resp_s = first_resp_s.query(
description__match=cleaned['first_search_term'])
first_params['q'] = cleaned['first_search_term']
if ('date_start' not in first_params
and 'date_end' not in first_params):
# FIXME - If there's no start date, then we want
# "everything" so we use a hard-coded 2013-01-01 date
# here to hack that.
#
# Better way might be to change the dashboard to allow
# for an "infinite" range, but there's no other use
# case for that and the ranges are done in the ui--not
# in the backend.
first_params['date_start'] = '2013-01-01'
first_resp_s = first_resp_s.facet('description_bigrams',
size=30, filtered=True)
first_resp_s = first_resp_s[0:0]
first_facet_total = first_resp_s.count()
first_facet = first_resp_s.facet_counts()
first_facet_bi = first_facet['description_bigrams']
first_facet_bi = sorted(
first_facet_bi, key=lambda item: -item['count'])
if (cleaned['second_version']
or cleaned['second_search_term']
or cleaned['second_start_date']):
second_resp_s = (ResponseMappingType.search()
.filter(product=cleaned['product'])
.filter(locale__startswith='en'))
second_params['product'] = cleaned['product']
if cleaned['second_version']:
second_resp_s = second_resp_s.filter(
version=cleaned['second_version'])
second_params['version'] = cleaned['second_version']
if cleaned['second_start_date']:
second_resp_s = second_resp_s.filter(
created__gte=cleaned['second_start_date'])
second_params['date_start'] = cleaned['second_start_date']
if cleaned['second_end_date']:
second_resp_s = second_resp_s.filter(
created__lte=cleaned['second_end_date'])
second_params['date_end'] = cleaned['second_end_date']
if form.cleaned_data['second_search_term']:
second_resp_s = second_resp_s.query(
description__match=cleaned['second_search_term'])
second_params['q'] = cleaned['second_search_term']
if ('date_start' not in second_params
and 'date_end' not in second_params):
# FIXME - If there's no start date, then we want
# "everything" so we use a hard-coded 2013-01-01 date
# here to hack that.
#
# Better way might be to change the dashboard to allow
# for an "infinite" range, but there's no other use
# case for that and the ranges are done in the ui--not
# in the backend.
second_params['date_start'] = '2013-01-01'
# Have to do raw because we want a size > 10.
second_resp_s = second_resp_s.facet('description_bigrams',
size=30, filtered=True)
second_resp_s = second_resp_s[0:0]
second_facet_total = second_resp_s.count()
second_facet = second_resp_s.facet_counts()
second_facet_bi = second_facet['description_bigrams']
second_facet_bi = sorted(
second_facet_bi, key=lambda item: -item['count'])
permalink = request.build_absolute_uri()
else:
permalink = ''
form = OccurrencesComparisonForm()
# FIXME - We have responses that have no product set. This ignores
# those. That's probably the right thing to do for the Occurrences Report
# but maybe not.
products = [prod for prod in ResponseMappingType.get_products() if prod]
return render(request, template, {
'permalink': permalink,
'form': form,
'products': products,
'first_facet_bi': first_facet_bi,
'first_params': first_params,
'first_facet_total': first_facet_total,
'first_normalization': round(first_facet_total * 1.0 / 1000, 3),
'second_facet_bi': second_facet_bi,
'second_params': second_params,
'second_facet_total': second_facet_total,
'second_normalization': round(second_facet_total * 1.0 / 1000, 3),
'render_time': datetime.now(),
})
@check_new_user
@analyzer_required
@es_required_or_50x(error_template='analytics/es_down.html')
@es_error_statsd
def analytics_duplicates(request):
"""Shows all duplicate descriptions over the last n days"""
template = 'analytics/analyzer/duplicates.html'
n = 14
responses = (ResponseMappingType.search()
.filter(created__gte=datetime.now() - timedelta(days=n))
.values_dict('description', 'happy', 'created', 'locale',
'user_agent', 'id')
.order_by('created').everything())
responses = ResponseMappingType.reshape(responses)
total_count = len(responses)
response_dupes = {}
for resp in responses:
response_dupes.setdefault(resp['description'], []).append(resp)
response_dupes = [
(key, val) for key, val in response_dupes.items()
if len(val) > 1
]
# convert the dict into a list of tuples sorted by the number of
# responses per tuple largest number first
response_dupes = sorted(response_dupes, key=lambda item: len(item[1]) * -1)
# duplicate_count -> count
# i.e. "how many responses had 2 duplicates?"
summary_counts = defaultdict(int)
for desc, responses in response_dupes:
summary_counts[len(responses)] = summary_counts[len(responses)] + 1
summary_counts = sorted(summary_counts.items(), key=lambda item: item[0])
return render(request, template, {
'n': 14,
'response_dupes': response_dupes,
'render_time': datetime.now(),
'summary_counts': summary_counts,
'total_count': total_count,
})
def _analytics_search_export(request, opinions_s):
"""Handles CSV export for analytics search
This only exports MAX_OPINIONS amount. It adds a note to the top
about that if the results are truncated.
"""
MAX_OPINIONS = 1000
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(
datetime.now().strftime('%Y%m%d_%H%M_search_export.csv'))
keys = Response.get_export_keys(confidential=True)
total_opinions = opinions_s.count()
opinions_s = opinions_s.values_list('id')[:MAX_OPINIONS]
# We convert what we get back from ES to what's in the db so we
# can get all the information.
opinions = Response.objects.filter(
id__in=[mem[0][0] for mem in opinions_s])
writer = csv.writer(response)
# Specify what this search is
writer.writerow(['URL: {0}'.format(request.get_full_path())])
writer.writerow(['Params: ' +
' '.join(['{0}: {1}'.format(key, val)
for key, val in request.GET.items()])])
# Add note if we truncated.
if total_opinions > MAX_OPINIONS:
writer.writerow(['Truncated {0} rows.'.format(
total_opinions - MAX_OPINIONS)])
# Write headers row.
writer.writerow(keys)
# Write opinion rows.
for op in opinions:
writer.writerow([force_bytes(getattr(op, key)) for key in keys])
return response
@check_new_user
@analyzer_required
@es_required_or_50x(error_template='analytics/es_down.html')
@es_error_statsd
def analytics_search(request):
template = 'analytics/analyzer/search.html'
output_format = request.GET.get('format', None)
page = smart_int(request.GET.get('page', 1), 1)
# Note: If we add additional querystring fields, we need to add
# them to generate_dashboard_url.
search_happy = request.GET.get('happy', None)
search_has_email = request.GET.get('has_email', None)
search_platform = request.GET.get('platform', None)
search_locale = request.GET.get('locale', None)
search_country = request.GET.get('country', None)
search_product = request.GET.get('product', None)
search_domain = request.GET.get('domain', None)
search_api = smart_int(request.GET.get('api', None), fallback=None)
search_version = request.GET.get('version', None)
search_query = request.GET.get('q', None)
search_date_start = smart_date(
request.GET.get('date_start', None), fallback=None)
search_date_end = smart_date(
request.GET.get('date_end', None), fallback=None)
search_bigram = request.GET.get('bigram', None)
search_source = request.GET.get('source', None)
search_campaign = request.GET.get('campaign', None)
search_organic = request.GET.get('organic', None)
selected = request.GET.get('selected', None)
filter_data = []
current_search = {'page': page}
search = ResponseMappingType.search()
f = F()
# If search happy is '0' or '1', set it to False or True, respectively.
search_happy = {'0': False, '1': True}.get(search_happy, None)
if search_happy in [False, True]:
f &= F(happy=search_happy)
current_search['happy'] = int(search_happy)
# If search has_email is '0' or '1', set it to False or True,
# respectively.
search_has_email = {'0': False, '1': True}.get(search_has_email, None)
if search_has_email in [False, True]:
f &= F(has_email=search_has_email)
current_search['has_email'] = int(search_has_email)
def unknown_to_empty(text):
"""Convert "Unknown" to "" to support old links"""
return u'' if text.lower() == u'unknown' else text
if search_platform is not None:
f &= F(platform=unknown_to_empty(search_platform))
current_search['platform'] = search_platform
if search_locale is not None:
f &= F(locale=unknown_to_empty(search_locale))
current_search['locale'] = search_locale
if search_product is not None:
f &= F(product=unknown_to_empty(search_product))
current_search['product'] = search_product
# Only show the version if there's a product.
if search_version is not None:
# Note: We only filter on version if we're filtering on
# product.
f &= F(version=unknown_to_empty(search_version))
current_search['version'] = search_version
# Only show the country if the product is Firefox OS.
if search_country is not None and search_product == 'Firefox OS':
f &= F(country=unknown_to_empty(search_country))
current_search['country'] = search_country
if search_domain is not None:
f &= F(url_domain=unknown_to_empty(search_domain))
current_search['domain'] = search_domain
if search_api is not None:
f &= F(api=search_api)
current_search['api'] = search_api
if search_date_start is None and search_date_end is None:
selected = '7d'
if search_date_end is None:
search_date_end = datetime.now()
if search_date_start is None:
search_date_start = search_date_end - timedelta(days=7)
current_search['date_end'] = search_date_end.strftime('%Y-%m-%d')
# Add one day, so that the search range includes the entire day.
end = search_date_end + timedelta(days=1)
# Note 'less than', not 'less than or equal', because of the added
# day above.
f &= F(created__lt=end)
current_search['date_start'] = search_date_start.strftime('%Y-%m-%d')
f &= F(created__gte=search_date_start)
if search_query:
current_search['q'] = search_query
search = search.query(description__sqs=search_query)
if search_bigram is not None:
f &= F(description_bigrams=search_bigram)
filter_data.append({
'display': 'Bigram',
'name': 'bigram',
'options': [{
'count': 'all',
'name': search_bigram,
'display': search_bigram,
'value': search_bigram,
'checked': True
}]
})
if search_source is not None:
f &= F(source=search_source)
current_search['source'] = search_source
if search_campaign is not None:
f &= F(campaign=search_campaign)
current_search['campaign'] = search_campaign
search_organic = {'0': False, '1': True}.get(search_organic, None)
if search_organic in [False, True]:
f &= F(organic=search_organic)
current_search['organic'] = int(search_organic)
search = search.filter(f).order_by('-created')
# If they're asking for a CSV export, then send them to the export
# screen.
if output_format == 'csv':
return _analytics_search_export(request, search)
# Search results and pagination
if page < 1:
page = 1
page_count = 50
start = page_count * (page - 1)
end = start + page_count
search_count = search.count()
search_results = search.values_list('id')[start:end]
opinion_page_ids = [mem[0][0] for mem in search_results]
# We convert what we get back from ES to what's in the db so we
# can get all the information.
opinion_page = Response.objects.filter(id__in=opinion_page_ids)
# Navigation facet data
# This loop does two things. First it maps 'T' -> True and 'F' ->
# False. This is probably something EU should be doing for
# us. Second, it restructures the data into a more convenient
# form.
counts = {
'happy': {},
'has_email': {},
'platform': {},
'locale': {},
'country': {},
'product': {},
'version': {},
'url_domain': {},
'api': {},
'source': {},
'campaign': {},
'organic': {},
}
facets = search.facet(*(counts.keys()),
size=1000,
filtered=bool(search._process_filters(f.filters)))
for param, terms in facets.facet_counts().items():
for term in terms:
name = term['term']
if name == 'T':
name = True
elif name == 'F':
name = False
counts[param][name] = term['count']
def empty_to_unknown(text):
return 'Unknown' if text == u'' else text
filter_data.extend([
counts_to_options(
counts['happy'].items(),
name='happy',
display='Sentiment',
display_map={True: 'Happy', False: 'Sad'},
value_map={True: 1, False: 0},
checked=search_happy),
counts_to_options(
counts['has_email'].items(),
name='has_email',
display='Has email',
display_map={True: 'Yes', False: 'No'},
value_map={True: 1, False: 0},
checked=search_has_email),
counts_to_options(
counts['product'].items(),
name='product',
display='Product',
display_map=empty_to_unknown,
checked=search_product)
])
# Only show the version if we're showing a specific
# product.
if search_product:
filter_data.append(
counts_to_options(
counts['version'].items(),
name='version',
display='Version',
display_map=empty_to_unknown,
checked=search_version)
)
# Only show the country if the product is Firefox OS.
if search_product == 'Firefox OS':
filter_data.append(
counts_to_options(
counts['country'].items(),
name='country',
display='Country',
checked=search_country,
display_map=country_name),
)
filter_data.extend(
[
counts_to_options(
counts['platform'].items(),
name='platform',
display='Platform',
display_map=empty_to_unknown,
checked=search_platform),
counts_to_options(
counts['locale'].items(),
name='locale',
display='Locale',
checked=search_locale,
display_map=locale_name),
counts_to_options(
counts['url_domain'].items(),
name='domain',
display='Domain',
checked=search_domain,
display_map=empty_to_unknown),
counts_to_options(
counts['api'].items(),
name='api',
display='API version',
checked=search_api,
display_map=empty_to_unknown),
counts_to_options(
counts['organic'].items(),
name='organic',
display='Organic',
display_map={True: 'Yes', False: 'No'},
value_map={True: 1, False: 0},
checked=search_organic),
counts_to_options(
counts['source'].items(),
name='source',
display='Source',
checked=search_source,
display_map=empty_to_unknown),
counts_to_options(
counts['campaign'].items(),
name='campaign',
display='Campaign',
checked=search_campaign,
display_map=empty_to_unknown),
]
)
return render(request, template, {
'opinions': opinion_page,
'opinion_count': search_count,
'filter_data': filter_data,
'page': page,
'prev_page': page - 1 if start > 0 else None,
'next_page': page + 1 if end < search_count else None,
'current_search': current_search,
'selected': selected,
})
@check_new_user
@analyzer_required
@es_required_or_50x(error_template='analytics/es_down.html')
@es_error_statsd
def analytics_hourly_histogram(request):
"""Shows an hourly histogram for the last 5 days of all responses"""
template = 'analytics/analyzer/hourly_histogram.html'
date_end = smart_date(
request.GET.get('date_end', None), fallback=None)
if date_end is None:
date_end = date.today()
date_start = date_end - timedelta(days=5)
search = ResponseMappingType.search()
filters = F(created__gte=date_start, created__lte=date_end)
search.filter(filters)
hourly_histogram = search.facet_raw(
hourly={
'date_histogram': {'interval': 'hour', 'field': 'created'},
'facet_filter': search._process_filters(filters.filters)
}).facet_counts()
hourly_data = dict((p['time'], p['count'])
for p in hourly_histogram['hourly'])
hour = 60 * 60 * 1000.0
zero_fill(date_start, date_end, [hourly_data], spacing=hour)
# FIXME: This is goofy. After zero_fill, we end up with a bunch of
# trailing zeros for reasons I don't really understand, so instead
# of fixing that, I'm just going to remove them here.
hourly_data = sorted(hourly_data.items())
while hourly_data and hourly_data[-1][1] == 0:
hourly_data.pop(-1)
histogram = [
{'label': 'Hourly', 'name': 'hourly',
'data': hourly_data},
]
return render(request, template, {
'histogram': histogram,
'start_date': date_start,
'end_date': date_end
})
class ProductsUpdateView(FormView):
"""An administrator view for showing, adding, and updating the products."""
template_name = 'analytics/analyzer/addproducts.html'
form_class = ProductsUpdateForm
success_url = 'products'
@method_decorator(check_new_user)
@method_decorator(analyzer_required)
def dispatch(self, *args, **kwargs):
return super(ProductsUpdateView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
if 'pk' in request.GET:
self.object = get_object_or_404(Product, pk=request.GET['pk'])
return super(ProductsUpdateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProductsUpdateView, self).get_context_data(**kwargs)
context['products'] = Product.objects.all()
return context
def get_form_kwargs(self):
kwargs = super(ProductsUpdateView, self).get_form_kwargs()
if hasattr(self, 'object'):
kwargs['instance'] = self.object
return kwargs
def form_valid(self, form):
try:
instance = Product.objects.get(db_name=form.data.get('db_name'))
instance.slug = form.data.get('slug') or instance.slug
instance.display_name = (form.data.get('display_name') or
instance.display_name)
instance.notes = form.data.get('notes') or instance.notes
instance.enabled = form.data.get('enabled') or False
instance.on_dashboard = form.data.get('on_dashboard') or False
instance.on_picker = form.data.get('on_picker') or False
instance.browser = form.data.get('browser') or u''
instance.browser_data_browser = form.data.get('browser_data_browser') or u''
self.object = instance.save()
except Product.DoesNotExist:
self.object = form.save()
return super(ProductsUpdateView, self).form_valid(form)
|
|
import ctypes
from ctypes import (POINTER, c_char_p, c_size_t, c_int, c_long, c_ulong,
c_double, c_void_p)
from ctypes.util import find_library
class _c_gsl_rng_type(ctypes.Structure):
_fields_ = [('name', c_char_p),
('max', c_long),
('min', c_size_t),
('__set', c_void_p),
('__get', c_void_p),
('__get_double', c_void_p),
]
_c_gsl_rng_type_p = POINTER(_c_gsl_rng_type)
class _c_gsl_rng(ctypes.Structure):
_fields_ = [('type', _c_gsl_rng_type_p),
('state', c_void_p)]
_c_gsl_rng_p = POINTER(_c_gsl_rng)
class _GSLFuncLoader(object):
# see: http://code.activestate.com/recipes/576549-gsl-with-python3/
gslcblas = ctypes.CDLL(find_library('gslcblas'), mode=ctypes.RTLD_GLOBAL)
gsl = ctypes.CDLL(find_library('gsl'))
def _load_1(self, name, argtypes=None, restype=None):
func = getattr(self.gsl, name)
if argtypes is not None:
func.argtypes = argtypes
if restype is not None:
func.restype = restype
setattr(self, name, func)
return func
def _load(self, name, argtypes=None, restype=None):
if isinstance(name, str):
return self._load_1(name, argtypes, restype)
else:
try:
return [self._load_1(n, argtypes, restype) for n in name]
except TypeError:
raise ValueError('name=%r should be a string or iterative '
'of string' % name)
func = _GSLFuncLoader()
func._load('gsl_strerror', [c_int], c_char_p)
func._load('gsl_rng_alloc', [_c_gsl_rng_type_p], _c_gsl_rng_p)
func._load('gsl_rng_set', [_c_gsl_rng_p, c_ulong])
func._load('gsl_rng_free', [_c_gsl_rng_p])
func._load('gsl_rng_types_setup',
restype=c_void_p) # POINTER(_c_gsl_rng_p)
func._load('gsl_rng_state', [_c_gsl_rng_p], c_void_p)
func._load('gsl_rng_size', [_c_gsl_rng_p], c_size_t)
func._load(['gsl_ran_gaussian',
'gsl_ran_gaussian_ziggurat',
'gsl_ran_gaussian_ratio_method'],
[_c_gsl_rng_p, c_double],
c_double)
gsl_strerror = func.gsl_strerror
def _get_gsl_rng_type_p_dict():
"""
Get all ``gsl_rng_type`` as dict which has pointer to each object
This is equivalent to C code bellow which is from GSL document:
.. sourcecode:: c
const gsl_rng_type **t, **t0;
t0 = gsl_rng_types_setup ();
for (t = t0; *t != 0; t++)
{
printf ("%s\n", (*t)->name); /* instead, store t to dict */
}
"""
t = func.gsl_rng_types_setup()
dt = ctypes.sizeof(c_void_p)
dct = {}
while True:
a = c_void_p.from_address(t)
if a.value is None:
break
name = c_char_p.from_address(a.value).value
name = name.decode() # for Python 3 (bytes to str)
dct[name] = ctypes.cast(a, _c_gsl_rng_type_p)
t += dt
return dct
class gsl_rng(object):
_gsl_rng_alloc = func.gsl_rng_alloc
_gsl_rng_set = func.gsl_rng_set
_gsl_rng_free = func.gsl_rng_free
_gsl_rng_type_p_dict = _get_gsl_rng_type_p_dict()
_ctype_ = _c_gsl_rng_p # for railgun
def __init__(self, seed=None, name='mt19937'):
self._gsl_rng_name = name
self._gsl_rng_type_p = self._gsl_rng_type_p_dict[name]
self._cdata_ = self._gsl_rng_alloc(self._gsl_rng_type_p)
# the name '_cdata_' is for railgun
if seed is not None:
self.set(seed)
def __setstate__(self, data):
(attrs, state) = data
self.__init__(name=attrs.pop('_gsl_rng_name'))
self.__dict__.update(attrs)
self.set_state(state)
def __getstate__(self):
attrs = self.__dict__.copy()
del attrs['_gsl_rng_type_p']
del attrs['_cdata_']
return (attrs, self.get_state())
def __copy__(self):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
return clone
def __del__(self):
self._gsl_rng_free(self._cdata_)
def set(self, seed):
self._gsl_rng_set(self._cdata_, seed)
_gsl_ran_gaussian = {
'': func.gsl_ran_gaussian,
'ziggurat': func.gsl_ran_gaussian_ziggurat,
'ratio_method': func.gsl_ran_gaussian_ratio_method,
}
def ran_gaussian(self, sigma=1.0, method=''):
return self._gsl_ran_gaussian[method](self._cdata_, sigma)
def get_state(self):
"""
Return state of the random number generator as a byte string.
"""
ptr = func.gsl_rng_state(self._cdata_)
size = func.gsl_rng_size(self._cdata_)
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, ptr, size)
return buf.raw
def set_state(self, state):
"""
Set state returned by :meth:`get_state`.
"""
ptr = func.gsl_rng_state(self._cdata_)
size = func.gsl_rng_size(self._cdata_)
given_size = len(state)
# Pass size explicitly, otherwise it will create a buffer with
# extra NULL terminator in it:
buf = ctypes.create_string_buffer(state, given_size)
if given_size != size:
raise ValueError(
'Trying to set incompatible length of state. '
'Size of the given state is {0} while {1} is required. '
.format(given_size, size))
ctypes.memmove(ptr, buf, size)
def plot_gaussian(method='', sigma=1, show=True):
import pylab
rng = gsl_rng()
pylab.hist(
[rng.ran_gaussian(method=method, sigma=sigma) for i in range(10000)],
bins=100, normed=True)
if show:
pylab.show()
def print_error_codes():
for i in range(1000):
error_message = gsl_strerror(i)
if error_message != 'unknown error code':
print('% 4d: "%s"' % (i, error_message))
def main():
import sys
cmd2func = dict(
print_error_codes=print_error_codes,
plot_gaussian=plot_gaussian,
)
if len(sys.argv) == 0:
print('Please specify command or code to execute\n')
for name in sorted(cmd2func):
print(name)
else:
(cmd,) = sys.argv[1:]
if cmd in cmd2func:
print("Calling function: %s" % cmd)
cmd2func[cmd]()
else:
print("Executing code: %s" % cmd)
ret = eval(cmd, globals())
if ret is not None:
print("Returned %r" % ret)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import os
import unittest
import mock
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
import nltk
from textblob.tokenizers import WordTokenizer
from textblob.classifiers import (NaiveBayesClassifier, DecisionTreeClassifier,
basic_extractor, contains_extractor, NLTKClassifier,
PositiveNaiveBayesClassifier, _get_words_from_dataset,
MaxEntClassifier)
from textblob import formats
from textblob.compat import unicode
from textblob.exceptions import FormatError
HERE = os.path.abspath(os.path.dirname(__file__))
CSV_FILE = os.path.join(HERE, 'data.csv')
JSON_FILE = os.path.join(HERE, "data.json")
TSV_FILE = os.path.join(HERE, "data.tsv")
train_set = [
('I love this car', 'positive'),
('This view is amazing', 'positive'),
('I feel great this morning', 'positive'),
('I am so excited about the concert', 'positive'),
('He is my best friend', 'positive'),
('I do not like this car', 'negative'),
('This view is horrible', 'negative'),
('I feel tired this morning', 'negative'),
('I am not looking forward to the concert', 'negative'),
('He is my enemy', 'negative')
]
test_set = [('I feel happy this morning', 'positive'),
('Larry is my friend.', 'positive'),
('I do not like that man.', 'negative'),
('My house is not great.', 'negative'),
('Your song is annoying.', 'negative')]
class BadNLTKClassifier(NLTKClassifier):
'''An NLTK classifier without ``nltk_class`` defined. Oops!'''
pass
class TestNLTKClassifier(unittest.TestCase):
def setUp(self):
self.bad_classifier = BadNLTKClassifier(train_set)
def test_raises_value_error_without_nltk_class(self):
assert_raises(ValueError,
lambda: self.bad_classifier.classifier)
assert_raises(ValueError,
lambda: self.bad_classifier.train(train_set))
assert_raises(ValueError,
lambda: self.bad_classifier.update([("This is no good.", 'negative')]))
class TestNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
self.classifier = NaiveBayesClassifier(train_set)
def test_default_extractor(self):
text = "I feel happy this morning."
assert_equal(self.classifier.extract_features(text), basic_extractor(text, train_set))
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(train_set))
def test_classify_a_list_of_words(self):
res = self.classifier.classify(["I", "feel", "happy", "this", "morning"])
assert_equal(res, "positive")
def test_train_from_lists_of_words(self):
# classifier can be trained on lists of words instead of strings
train = [(doc.split(), label) for doc, label in train_set]
classifier = NaiveBayesClassifier(train)
assert_equal(classifier.accuracy(test_set),
self.classifier.accuracy(test_set))
def test_prob_classify(self):
res = self.classifier.prob_classify("I feel happy this morning")
assert_equal(res.max(), "positive")
assert_true(res.prob("positive") > res.prob("negative"))
def test_accuracy(self):
acc = self.classifier.accuracy(test_set)
assert_true(isinstance(acc, float))
def test_update(self):
res1 = self.classifier.prob_classify("lorem ipsum")
original_length = len(self.classifier.train_set)
self.classifier.update([("lorem ipsum", "positive")])
new_length = len(self.classifier.train_set)
res2 = self.classifier.prob_classify("lorem ipsum")
assert_true(res2.prob("positive") > res1.prob("positive"))
assert_equal(original_length + 1, new_length)
def test_labels(self):
labels = self.classifier.labels()
assert_true("positive" in labels)
assert_true("negative" in labels)
def test_show_informative_features(self):
feats = self.classifier.show_informative_features()
def test_informative_features(self):
feats = self.classifier.informative_features(3)
assert_true(isinstance(feats, list))
assert_true(isinstance(feats[0], tuple))
def test_custom_feature_extractor(self):
cl = NaiveBayesClassifier(train_set, custom_extractor)
cl.classify("Yay! I'm so happy it works.")
assert_equal(cl.train_features[0][1], 'positive')
def test_init_with_csv_file(self):
with open(CSV_FILE) as fp:
cl = NaiveBayesClassifier(fp, format="csv")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_csv_file_without_format_specifier(self):
with open(CSV_FILE) as fp:
cl = NaiveBayesClassifier(fp)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file(self):
with open(JSON_FILE) as fp:
cl = NaiveBayesClassifier(fp, format="json")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file_without_format_specifier(self):
with open(JSON_FILE) as fp:
cl = NaiveBayesClassifier(fp)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_custom_format(self):
redis_train = [('I like turtles', 'pos'), ('I hate turtles', 'neg')]
class MockRedisFormat(formats.BaseFormat):
def __init__(self, client, port):
self.client = client
self.port = port
@classmethod
def detect(cls, stream):
return True
def to_iterable(self):
return redis_train
formats.register('redis', MockRedisFormat)
mock_redis = mock.Mock()
cl = NaiveBayesClassifier(mock_redis, format='redis', port=1234)
assert_equal(cl.train_set, redis_train)
def test_data_with_no_available_format(self):
mock_fp = mock.Mock()
mock_fp.read.return_value = ''
assert_raises(FormatError, lambda: NaiveBayesClassifier(mock_fp))
def test_accuracy_on_a_csv_file(self):
with open(CSV_FILE) as fp:
a = self.classifier.accuracy(fp)
assert_equal(type(a), float)
def test_accuracy_on_json_file(self):
with open(CSV_FILE) as fp:
a = self.classifier.accuracy(fp)
assert_equal(type(a), float)
def test_init_with_tsv_file(self):
with open(TSV_FILE) as fp:
cl = NaiveBayesClassifier(fp)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_bad_format_specifier(self):
assert_raises(ValueError,
lambda: NaiveBayesClassifier(CSV_FILE, format='unknown'))
def test_repr(self):
assert_equal(repr(self.classifier),
"<NaiveBayesClassifier trained on {0} instances>".format(len(train_set)))
class TestDecisionTreeClassifier(unittest.TestCase):
def setUp(self):
self.classifier = DecisionTreeClassifier(train_set)
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(train_set))
def test_accuracy(self):
acc = self.classifier.accuracy(test_set)
assert_true(isinstance(acc, float))
def test_update(self):
original_length = len(self.classifier.train_set)
self.classifier.update([("lorem ipsum", "positive")])
new_length = len(self.classifier.train_set)
assert_equal(original_length + 1, new_length)
def test_custom_feature_extractor(self):
cl = DecisionTreeClassifier(train_set, custom_extractor)
cl.classify("Yay! I'm so happy it works.")
assert_equal(cl.train_features[0][1], 'positive')
def test_pseudocode(self):
code = self.classifier.pseudocode()
assert_true("if" in code)
def test_pretty_format(self):
pp = self.classifier.pprint(width=60)
pf = self.classifier.pretty_format(width=60)
assert_true(isinstance(pp, unicode))
assert_equal(pp, pf)
def test_repr(self):
assert_equal(repr(self.classifier),
"<DecisionTreeClassifier trained on {0} instances>".format(len(train_set)))
@attr('requires_numpy')
@attr('slow')
class TestMaxEntClassifier(unittest.TestCase):
def setUp(self):
self.classifier = MaxEntClassifier(train_set)
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(train_set))
def test_prob_classify(self):
res = self.classifier.prob_classify("I feel happy this morning")
assert_equal(res.max(), 'positive')
assert_true(res.prob("positive") > res.prob("negative"))
class TestPositiveNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
sports_sentences = ['The team dominated the game',
'They lost the ball',
'The game was intense',
'The goalkeeper catched the ball',
'The other team controlled the ball'
'The ball went off the court',
'They had the ball for the whole game']
various_sentences = ['The President did not comment',
'I lost the keys',
'The team won the game',
'Sara has two kids',
'The show is over',
'The cat ate the mouse.']
self.classifier = PositiveNaiveBayesClassifier(positive_set=sports_sentences,
unlabeled_set=various_sentences)
def test_classifier(self):
assert_true(isinstance(self.classifier.classifier,
nltk.classify.PositiveNaiveBayesClassifier))
def test_classify(self):
assert_true(self.classifier.classify("My team lost the game."))
assert_false(self.classifier.classify("The cat is on the table."))
def test_update(self):
orig_pos_length = len(self.classifier.positive_set)
orig_unlabeled_length = len(self.classifier.unlabeled_set)
self.classifier.update(new_positive_data=['He threw the ball to the base.'],
new_unlabeled_data=["I passed a tree today."])
new_pos_length = len(self.classifier.positive_set)
new_unlabeled_length = len(self.classifier.unlabeled_set)
assert_equal(new_pos_length, orig_pos_length + 1)
assert_equal(new_unlabeled_length, orig_unlabeled_length + 1)
def test_accuracy(self):
test_set = [
("My team lost the game", True),
("The ball was in the court.", True),
("We should have won the game.", True),
("And now for something completely different", False),
("I can't believe it's not butter.", False)
]
accuracy = self.classifier.accuracy(test_set)
assert_true(isinstance(accuracy, float))
def test_repr(self):
assert_equal(repr(self.classifier),
"<PositiveNaiveBayesClassifier trained on {0} labeled and {1} unlabeled instances>"
.format(len(self.classifier.positive_set),
len(self.classifier.unlabeled_set))
)
def test_basic_extractor():
text = "I feel happy this morning."
feats = basic_extractor(text, train_set)
assert_true(feats["contains(feel)"])
assert_true(feats['contains(morning)'])
assert_false(feats["contains(amazing)"])
def test_basic_extractor_with_list():
text = "I feel happy this morning.".split()
feats = basic_extractor(text, train_set)
assert_true(feats["contains(feel)"])
assert_true(feats['contains(morning)'])
assert_false(feats["contains(amazing)"])
def test_contains_extractor_with_string():
text = "Simple is better than complex"
features = contains_extractor(text)
assert_true(features["contains(Simple)"])
assert_false(features.get('contains(simple)', False))
assert_true(features['contains(complex)'])
assert_false(features.get("contains(derp)", False))
def test_contains_extractor_with_list():
text = ["Simple", "is", "better", "than", "complex"]
features = contains_extractor(text)
assert_true(features['contains(Simple)'])
assert_false(features.get("contains(simple)", False))
assert_true(features['contains(complex)'])
assert_false(features.get("contains(derp)", False))
def custom_extractor(document):
feats = {}
tokens = document.split()
for tok in tokens:
feat_name = "last_letter({0})".format(tok[-1])
feats[feat_name] = True
return feats
def test_get_words_from_dataset():
tok = WordTokenizer()
all_words = []
for words, _ in train_set:
all_words.extend(tok.itokenize(words, include_punc=False))
assert_equal(_get_words_from_dataset(train_set), set(all_words))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from Benchmarks import *
import CpuConfig
import MemConfig
def _listCpuTypes(option, opt, value, parser):
CpuConfig.print_cpu_list()
sys.exit(0)
def _listMemTypes(option, opt, value, parser):
MemConfig.print_mem_list()
sys.exit(0)
def addCommonOptions(parser):
# system options
parser.add_option("--list-cpu-types",
action="callback", callback=_listCpuTypes,
help="List available CPU types")
parser.add_option("--cpu-type", type="choice", default="atomic",
choices=CpuConfig.cpu_names(),
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--sys-voltage", action="store", type="string",
default='1.0V',
help = """Top-level voltage for blocks running at system
power supply""")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
parser.add_option("--cpu-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at CPU speed")
parser.add_option("--smt", action="store_true", default=False,
help = """
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""")
# Memory Options
parser.add_option("--list-mem-types",
action="callback", callback=_listMemTypes,
help="List available memory types")
parser.add_option("--mem-type", type="choice", default="ddr3_1600_x64",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-channels", type="int", default=1,
help = "number of memory channels")
parser.add_option("--mem-size", action="store", type="string",
default="512MB",
help="Specify the physical memory size (single memory)")
parser.add_option("-l", "--lpae", action="store_true")
parser.add_option("-V", "--virtualisation", action="store_true")
# Cache Options
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--fastmem", action="store_true")
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
# Enable Ruby
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick,
metavar="TICKS", help="Run to absolute simulated tick " \
"specified including ticks from a restored checkpoint")
parser.add_option("--rel-max-tick", type="int", default=None,
metavar="TICKS", help="Simulate for specified number of" \
" ticks relative to the simulation start tick (e.g. if " \
"restoring a checkpoint)")
parser.add_option("--maxtime", type="float", default=None,
help="Run to the specified absolute simulated time in " \
"seconds")
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--num-work-ids", action="store", type="int",
help="Number of distinct work item types")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
# Simpoint options
parser.add_option("--simpoint-profile", action="store_true",
help="Enable basic block profiling for SimPoints")
parser.add_option("--simpoint-interval", type="int", default=10000000,
help="SimPoint interval in num of instructions")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> take checkpoints at tick M and every N ticks thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="atomic", choices=CpuConfig.cpu_names(),
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="str",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
parser.add_option("--spec-input", default="ref", type="choice",
choices=["ref", "test", "train", "smred", "mdred",
"lgred"],
help="Input set size for SPEC CPU2000 benchmarks.")
parser.add_option("--arm-iset", default="arm", type="choice",
choices=["arm", "thumb", "aarch64"],
help="ARM instruction set.")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--machine-type", action="store", type="choice",
choices=ArmMachineType.map.keys(), default="RealView_PBX")
parser.add_option("--dtb-filename", action="store", type="string",
help="Specifies device tree blob file to use with device-tree-"\
"enabled kernels")
parser.add_option("--enable-context-switch-stats-dump", \
action="store_true", help="Enable stats dump at context "\
"switches and dump tasks file (required for Streamline)")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
|
|
import datetime
import tornado.gen
from bson.objectid import ObjectId
import motor
import two.execute
from two.playconn import PlayerConnection
import twcommon.misc
from twcommon.excepts import MessageException, ErrorMessageException
from twcommon.excepts import SymbolError, ExecRunawayException
DIRTY_WORLD = 0x01 # World/instance data (creator, etc)
DIRTY_LOCALE = 0x02 # Main location description
DIRTY_FOCUS = 0x04 # Closeup view description
DIRTY_POPULACE = 0x08 # Who is in the location
DIRTY_TOOL = 0x10 # Toolpane "control panel" description
DIRTY_ALL = 0x1F # All of the above
class LocContext(object):
"""
Pure-data class. Sometimes -- in fact, often -- you want to tote around
a bunch of location information in one object. This lets you do it.
All of the fields are optional except uid (and really, we may run into
some situation where uid is None also).
"""
def __init__(self, uid, wid=None, scid=None, iid=None, locid=None):
self.uid = uid
self.wid = wid
self.scid = scid
self.iid = iid
self.locid = locid
def __repr__(self):
ls = []
if self.uid:
ls.append( ('uid', self.uid) )
if self.wid:
ls.append( ('wid', self.wid) )
if self.scid:
ls.append( ('scid', self.scid) )
if self.iid:
ls.append( ('iid', self.iid) )
if self.locid:
ls.append( ('locid', self.locid) )
val = ' '.join([ ('%s=%s' % (key, val)) for (key, val) in ls ])
return '<LocContext %s>' % (val,)
class Task(object):
"""
Context for the execution of one command in the command queue. This
is used for both player and server commands. (For server commands,
connid is zero. If the command came from within tworld, twwcid is
also zero.)
The basic life cycle is handle(), resolve(), close().
"""
# Limit on how much work a task can do before we kill it.
# (The task is actually run is several phases; this is the limit
# per phase.)
CPU_TICK_LIMIT = 4000
# Limit on how deep the eval stack can get.
STACK_DEPTH_LIMIT = 12
def __init__(self, app, cmdobj, connid, twwcid, queuetime):
self.app = app
self.log = app.log
# The Namespace object that represents the command:
self.cmdobj = cmdobj
# Connection ID for player that sent this (or 0 if from a server):
self.connid = connid
# Connection ID for tweb that invoked this (or 0 if it came from
# tworld itself):
self.twwcid = twwcid
# When this command was received by tworld:
self.queuetime = queuetime
# When we started working on the command:
self.starttime = twcommon.misc.now()
# Hard limit on how much script code we'll execute for this task.
self.cputicks = 0
# Total of cputicks over all the phases of the task.
self.totalcputicks = 0
# Maximum cputicks for a phase.
self.maxcputicks = 0
# Maps uids to LocContexts.
#self.loctxmap = {}
# This will be a set of change keys.
self.changeset = None
# This will map connection IDs to a bitmask of dirty bits.
# Values in this map should always be nonzero; if a connection
# is non-dirty, it should not be in the map.
self.updateconns = None
def close(self):
"""Clean up any large member variables. This probably reduces
ref cycles, or, if not, keeps my brain tidy.
"""
self.app = None
self.log = None
self.cmdobj = None
#self.loctxmap = None
self.updateconns = None
self.changeset = None
def tick(self, val=1):
self.cputicks = self.cputicks + 1
if (self.cputicks > self.CPU_TICK_LIMIT):
self.log.error('ExecRunawayException: User script exceeded tick limit!')
raise ExecRunawayException('Script ran too long; aborting!')
def resetticks(self):
self.totalcputicks = self.totalcputicks + self.cputicks
self.maxcputicks = max(self.maxcputicks, self.cputicks)
self.cputicks = 0
def is_writable(self):
return (self.updateconns is not None)
def set_writable(self):
self.changeset = set()
self.updateconns = {}
def set_data_change(self, key):
assert self.is_writable(), 'set_data_change: Task was never set writable'
self.changeset.add(key)
def set_data_changes(self, keylist):
assert self.is_writable(), 'set_data_changes: Task was never set writable'
self.changeset.update(keylist)
def set_dirty(self, ls, dirty):
# ls may be a PlayerConnection, a uid (an ObjectId), or a list
# of either. Or None.
# dirty is one or more DIRTY flags.
assert self.is_writable(), 'set_dirty: Task was never set writable'
if ls is None:
return
if type(ls) not in (tuple, list):
ls = ( ls, )
for obj in ls:
if isinstance(obj, PlayerConnection):
val = self.updateconns.get(obj.connid, 0) | dirty
self.updateconns[obj.connid] = val
elif isinstance(obj, ObjectId):
subls = self.app.playconns.get_for_uid(obj)
if subls:
for conn in subls:
val = self.updateconns.get(conn.connid, 0) | dirty
self.updateconns[conn.connid] = val
else:
self.log.warning('write_event: unrecognized %s', obj)
def write_event(self, ls, text):
# ls may be a PlayerConnection, a uid (an ObjectId), or a list
# of either. Or None.
if ls is None:
return
if type(ls) not in (tuple, list):
ls = ( ls, )
for obj in ls:
if isinstance(obj, PlayerConnection):
obj.write({'cmd':'event', 'text':text})
elif isinstance(obj, ObjectId):
subls = self.app.playconns.get_for_uid(obj)
if subls:
for conn in subls:
conn.write({'cmd':'event', 'text':text})
else:
self.log.warning('write_event: unrecognized %s', obj)
def clear_loctx(self, uid):
#if uid in self.loctxmap:
# del self.loctxmap[uid]
pass
@tornado.gen.coroutine
def get_loctx(self, uid):
#loctx = self.loctxmap.get(uid, None)
#if loctx:
# return loctx
playstate = yield motor.Op(self.app.mongodb.playstate.find_one,
{'_id':uid},
{'iid':1, 'locid':1, 'focus':1})
iid = playstate['iid']
if not iid:
loctx = LocContext(uid, None)
#self.loctxmap[uid] = loctx
return loctx
instance = yield motor.Op(self.app.mongodb.instances.find_one,
{'_id':iid})
loctx = LocContext(uid, instance['wid'], instance['scid'],
iid, playstate['locid'])
#self.loctxmap[uid] = loctx
return loctx
@tornado.gen.coroutine
def find_locale_players(self, uid=None, notself=False):
"""Generate a list of all players in the same location as a given
player. If no player is given, we presume the player that triggered
the current event. (Which means that for server events, you must
specify a uid or get None.)
If notself is true, the list excludes the given player.
"""
if uid is None:
conn = self.app.playconns.get(self.connid)
if not conn:
return None
uid = conn.uid
playstate = yield motor.Op(self.app.mongodb.playstate.find_one,
{'_id':uid},
{'iid':1, 'locid':1})
if not playstate:
return None
iid = playstate['iid']
if not iid:
return None
locid = playstate['locid']
if not locid:
return None
cursor = self.app.mongodb.playstate.find({'iid':iid, 'locid':locid},
{'_id':1})
people = []
while (yield cursor.fetch_next):
ostate = cursor.next_object()
if notself and ostate['_id'] == uid:
continue
people.append(ostate['_id'])
# cursor autoclose
return people
@tornado.gen.coroutine
def find_location_players(self, iid, locid):
"""Generates a list of players in a given location. If locid
is None, generates a list of players in the entire instance.
"""
if locid:
cursor = self.app.mongodb.playstate.find({'iid':iid, 'locid':locid},
{'_id':1})
else:
cursor = self.app.mongodb.playstate.find({'iid':iid},
{'_id':1})
people = []
while (yield cursor.fetch_next):
ostate = cursor.next_object()
people.append(ostate['_id'])
# cursor autoclose
return people
@tornado.gen.coroutine
def handle(self):
"""
Carry out a command. (Usually from a player, but sometimes generated
by the server itself.) 99% of tworld's work happens here.
Any exception raised by this function is considered serious, and
throws a full stack trace into the logs.
"""
self.log.debug('Handling message "%s": %s', self.cmdobj.cmd, str(self.cmdobj)[:64])
if self.app.shuttingdown:
raise Exception('The server is shutting down.')
cmdname = self.cmdobj.cmd
connid = self.connid
twwcid = self.twwcid
if connid == 0:
# A message not from any player!
if twwcid == 0:
# Internal message, from tworld itself.
stream = None
else:
# This is from tweb, not relayed from a player.
# (This is the rare case where we use twwcid; we have no
# other path back.)
stream = self.app.webconns.get(twwcid)
try:
if twwcid and not stream:
raise ErrorMessageException('Server message from completely unrecognized stream.')
cmd = self.app.all_commands.get(cmdname, None)
if not cmd:
raise ErrorMessageException('Unknown server command: "%s"' % (cmdname,))
if not cmd.isserver:
raise ErrorMessageException('Command must be invoked by a player: "%s"' % (cmdname,))
if not cmd.noneedmongo and not self.app.mongodb:
# Guess the database access is not going to work.
raise ErrorMessageException('Tworld has lost contact with the database.')
if cmd.doeswrite:
# May cause display changes.
self.set_writable()
res = yield cmd.func(self.app, self, self.cmdobj, stream)
if res is not None:
self.log.info('Command "%s" result: %s', cmdname, res)
except ErrorMessageException as ex:
self.log.warning('Error message running "%s": %s', cmdname, str(ex))
except MessageException as ex:
# MessageException is usually not worth logging, but for
# a server command, there's nobody else listening.
self.log.info('Message running "%s": %s', cmdname, str(ex))
# End of connid==0 case.
return
conn = self.app.playconns.get(connid)
# Command from a player (via conn). A MessageException here passes
# an error back to the player.
try:
cmd = self.app.all_commands.get(cmdname, None)
if not cmd:
raise ErrorMessageException('Unknown player command: "%s"' % (cmdname,))
# Check various limitations on the command.
if cmd.isserver:
raise ErrorMessageException('Command may not be invoked by a player: "%s"' % (cmdname,))
if cmd.restrict == 'admin':
player = yield motor.Op(self.app.mongodb.players.find_one,
{'_id':conn.uid},
{'admin':1})
if not (player and player.get('admin', False)):
raise ErrorMessageException('Command may only be invoked by an administrator: "%s"' % (cmdname,))
if cmd.restrict == 'creator':
# Player must be the creator of the world he is in.
### And it must be an unstable version.
# (Or an admin, anywhere.)
player = yield motor.Op(self.app.mongodb.players.find_one,
{'_id':conn.uid},
{'admin':1, 'build':1})
if not player:
raise ErrorMessageException('Player not found!')
if (player.get('admin', False)):
# Admins always have creator rights.
pass
elif (not player.get('build', False)):
raise ErrorMessageException('Command requires build permission: "%s"' % (cmdname,))
else:
playstate = yield motor.Op(self.app.mongodb.playstate.find_one,
{'_id':conn.uid},
{'iid':1})
instance = yield motor.Op(self.app.mongodb.instances.find_one,
{'_id':playstate['iid']})
world = yield motor.Op(self.app.mongodb.worlds.find_one,
{'_id':instance['wid']})
if world.get('creator', None) != conn.uid:
raise ErrorMessageException('Command may only be invoked by this world\'s creator: "%s"' % (cmdname,))
if not conn:
# Newly-established connection. Only 'playeropen' will be
# accepted. (Another twwcid case; we'll have to sneak the
# stream in through the command object.)
# (It's also possible that the connection closed since we
# queued this, in which case we still reject.)
if not cmd.preconnection:
raise ErrorMessageException('Tworld has not yet registered this connection.')
assert cmd.name=='playeropen', 'Command not playeropen should have already been rejected'
stream = self.app.webconns.get(twwcid)
if not stream:
raise ErrorMessageException('Message from completely unrecognized stream')
self.cmdobj._connid = connid
self.cmdobj._stream = stream
if not cmd.noneedmongo and not self.app.mongodb:
# Guess the database access is not going to work.
raise ErrorMessageException('Tworld has lost contact with the database.')
if cmd.doeswrite:
# May cause display changes.
self.set_writable()
res = yield cmd.func(self.app, self, self.cmdobj, conn)
if res is not None:
self.log.info('Command "%s" result: %s', cmdname, res)
except ErrorMessageException as ex:
# An ErrorMessageException is worth logging and sending back
# to the player, but not splatting out a stack trace.
self.log.warning('Error message running "%s": %s', cmdname, str(ex))
try:
# This is slightly hairy, because various error paths can
# arrive here with no conn or no connid.
if conn:
conn.write({'cmd':'error', 'text':str(ex)})
else:
# connid may be zero or nonzero, really
stream = self.app.webconns.get(twwcid)
stream.write(wcproto.message(connid, {'cmd':'error', 'text':str(ex)}))
except Exception as ex:
pass
except MessageException as ex:
# A MessageException is not worth logging.
try:
# This is slightly hairy, because various error paths can
# arrive here with no conn or no connid.
if conn:
conn.write({'cmd':'message', 'text':str(ex)})
else:
# connid may be zero or nonzero, really
stream = self.app.webconns.get(twwcid)
stream.write(wcproto.message(connid, {'cmd':'message', 'text':str(ex)}))
except Exception as ex:
pass
@tornado.gen.coroutine
def resolve(self):
"""
Resolve all side effects caused by data changes during this command.
Some connections will have been marked dirty already, as the commands
executed. The data changeset will also implicitly set connections
dirty, based on their current dependencies. After working that all
out, we send an update to each connection that needs it.
We reset the tick count per connection, so that a crowded room doesn't
wipe out the task.
"""
if not self.is_writable():
return
# Detach the update map. From this point on, the task is nonwritable
# again!
updateconns = self.updateconns
changeset = self.changeset
self.updateconns = None
self.changeset = None
# If nobody needs updating, we're done.
if not (changeset or updateconns):
return
connections = self.app.playconns.all()
# Go through the data changes, setting dirty bits as needed.
# (But we try to do as little work as possible.)
if changeset:
#self.log.debug('Task changeset: %s', changeset)
for conn in connections:
dirty = updateconns.get(conn.connid, 0)
if not (dirty & DIRTY_LOCALE):
if not conn.localedependencies.isdisjoint(changeset):
dirty |= DIRTY_LOCALE
if not (dirty & DIRTY_POPULACE):
if not conn.populacedependencies.isdisjoint(changeset):
dirty |= DIRTY_POPULACE
if not (dirty & DIRTY_FOCUS):
if not conn.focusdependencies.isdisjoint(changeset):
dirty |= DIRTY_FOCUS
if not (dirty & DIRTY_TOOL):
if not conn.tooldependencies.isdisjoint(changeset):
dirty |= DIRTY_TOOL
if dirty:
updateconns[conn.connid] = dirty
# Again, we might be done.
if not updateconns:
return
# self.log.info('Must resolve updates: %s', updateconns)
# If two connections are on the same player, this won't be
# as efficient as it might be -- we'll generate text twice.
# But that's a rare case.
for (connid, dirty) in updateconns.items():
try:
self.resetticks()
conn = self.app.playconns.get(connid)
yield two.execute.generate_update(self, conn, dirty)
except Exception as ex:
self.log.error('Error updating while resolving task: %s', self.cmdobj, exc_info=True)
|
|
#
# blowfish.py
# Copyright (C) 2002 Michael Gilfix <mgilfix@eecs.tufts.edu>
#
# This module is open source; you can redistribute it and/or
# modify it under the terms of the GPL or Artistic License.
# These licenses are available at http://www.opensource.org
#
# This software must be used and distributed in accordance
# with the law. The author claims no liability for its
# misuse.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# This software was modified by Ivan Voras: CTR cipher mode of
# operation was added, together with testing and example code.
# These changes are (c) 2007./08. Ivan Voras <ivoras@gmail.com>
# These changes can be used, modified ad distributed under the
# GPL or Artistic License, the same as the original module.
# All disclaimers of warranty from the original module also
# apply to these changes.
# CBC mode contributed by Joel Edwards <joeledwards@gmail.com>,
# under the same license conditions.
"""
Blowfish Encryption
This module is a pure python implementation of Bruce Schneier's
encryption scheme 'Blowfish'. Blowish is a 16-round Feistel Network
cipher and offers substantial speed gains over DES.
The key is a string of length anywhere between 64 and 448 bits, or
equivalently 8 and 56 bytes. The encryption and decryption functions operate
on 64-bit blocks, or 8 byte strings.
Send questions, comments, bugs my way:
Michael Gilfix <mgilfix@eecs.tufts.edu>
The module has been expanded to include CTR stream encryption/decryption
mode, built from the primitives from the orignal module. This change
did not alter any of the base Blowfish code from the original author.
The author of CTR changes is:
Ivan Voras <ivoras@gmail.com>
"""
import struct, types
__author__ = "Michael Gilfix <mgilfix@eecs.tufts.edu>"
class Blowfish:
"""Blowfish encryption Scheme
This class implements the encryption and decryption
functionality of the Blowfish cipher.
Public functions:
def __init__ (self, key)
Creates an instance of blowfish using 'key'
as the encryption key. Key is a string of
length ranging from 8 to 56 bytes (64 to 448
bits). Once the instance of the object is
created, the key is no longer necessary.
def encrypt (self, data):
Encrypt an 8 byte (64-bit) block of text
where 'data' is an 8 byte string. Returns an
8-byte encrypted string.
def decrypt (self, data):
Decrypt an 8 byte (64-bit) encrypted block
of text, where 'data' is the 8 byte encrypted
string. Returns an 8-byte string of plaintext.
def cipher (self, xl, xr, direction):
Encrypts a 64-bit block of data where xl is
the upper 32-bits and xr is the lower 32-bits.
'direction' is the direction to apply the
cipher, either ENCRYPT or DECRYPT constants.
returns a tuple of either encrypted or decrypted
data of the left half and right half of the
64-bit block.
def initCTR(self):
Initializes CTR engine for encryption or decryption.
def encryptCTR(self, data):
Encrypts an arbitrary string and returns the
encrypted string. The method can be called successively
for multiple string blocks.
def decryptCTR(self, data):
Decrypts a string encrypted with encryptCTR() and
returns the decrypted string.
Private members:
def __round_func (self, xl)
Performs an obscuring function on the 32-bit
block of data 'xl', which is the left half of
the 64-bit block of data. Returns the 32-bit
result as a long integer.
"""
# Cipher directions
ENCRYPT = 0
DECRYPT = 1
# For the __round_func
modulus = long (2) ** 32
def __init__ (self, key):
if not key or len (key) < 8 or len (key) > 56:
raise RuntimeError, "Attempted to initialize Blowfish cipher with key of invalid length: %s" % len (key)
self.p_boxes = [
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917,
0x9216D5D9, 0x8979FB1B
]
self.s_boxes = [
[
0xD1310BA6, 0x98DFB5AC, 0x2FFD72DB, 0xD01ADFB7,
0xB8E1AFED, 0x6A267E96, 0xBA7C9045, 0xF12C7F99,
0x24A19947, 0xB3916CF7, 0x0801F2E2, 0x858EFC16,
0x636920D8, 0x71574E69, 0xA458FEA3, 0xF4933D7E,
0x0D95748F, 0x728EB658, 0x718BCD58, 0x82154AEE,
0x7B54A41D, 0xC25A59B5, 0x9C30D539, 0x2AF26013,
0xC5D1B023, 0x286085F0, 0xCA417918, 0xB8DB38EF,
0x8E79DCB0, 0x603A180E, 0x6C9E0E8B, 0xB01E8A3E,
0xD71577C1, 0xBD314B27, 0x78AF2FDA, 0x55605C60,
0xE65525F3, 0xAA55AB94, 0x57489862, 0x63E81440,
0x55CA396A, 0x2AAB10B6, 0xB4CC5C34, 0x1141E8CE,
0xA15486AF, 0x7C72E993, 0xB3EE1411, 0x636FBC2A,
0x2BA9C55D, 0x741831F6, 0xCE5C3E16, 0x9B87931E,
0xAFD6BA33, 0x6C24CF5C, 0x7A325381, 0x28958677,
0x3B8F4898, 0x6B4BB9AF, 0xC4BFE81B, 0x66282193,
0x61D809CC, 0xFB21A991, 0x487CAC60, 0x5DEC8032,
0xEF845D5D, 0xE98575B1, 0xDC262302, 0xEB651B88,
0x23893E81, 0xD396ACC5, 0x0F6D6FF3, 0x83F44239,
0x2E0B4482, 0xA4842004, 0x69C8F04A, 0x9E1F9B5E,
0x21C66842, 0xF6E96C9A, 0x670C9C61, 0xABD388F0,
0x6A51A0D2, 0xD8542F68, 0x960FA728, 0xAB5133A3,
0x6EEF0B6C, 0x137A3BE4, 0xBA3BF050, 0x7EFB2A98,
0xA1F1651D, 0x39AF0176, 0x66CA593E, 0x82430E88,
0x8CEE8619, 0x456F9FB4, 0x7D84A5C3, 0x3B8B5EBE,
0xE06F75D8, 0x85C12073, 0x401A449F, 0x56C16AA6,
0x4ED3AA62, 0x363F7706, 0x1BFEDF72, 0x429B023D,
0x37D0D724, 0xD00A1248, 0xDB0FEAD3, 0x49F1C09B,
0x075372C9, 0x80991B7B, 0x25D479D8, 0xF6E8DEF7,
0xE3FE501A, 0xB6794C3B, 0x976CE0BD, 0x04C006BA,
0xC1A94FB6, 0x409F60C4, 0x5E5C9EC2, 0x196A2463,
0x68FB6FAF, 0x3E6C53B5, 0x1339B2EB, 0x3B52EC6F,
0x6DFC511F, 0x9B30952C, 0xCC814544, 0xAF5EBD09,
0xBEE3D004, 0xDE334AFD, 0x660F2807, 0x192E4BB3,
0xC0CBA857, 0x45C8740F, 0xD20B5F39, 0xB9D3FBDB,
0x5579C0BD, 0x1A60320A, 0xD6A100C6, 0x402C7279,
0x679F25FE, 0xFB1FA3CC, 0x8EA5E9F8, 0xDB3222F8,
0x3C7516DF, 0xFD616B15, 0x2F501EC8, 0xAD0552AB,
0x323DB5FA, 0xFD238760, 0x53317B48, 0x3E00DF82,
0x9E5C57BB, 0xCA6F8CA0, 0x1A87562E, 0xDF1769DB,
0xD542A8F6, 0x287EFFC3, 0xAC6732C6, 0x8C4F5573,
0x695B27B0, 0xBBCA58C8, 0xE1FFA35D, 0xB8F011A0,
0x10FA3D98, 0xFD2183B8, 0x4AFCB56C, 0x2DD1D35B,
0x9A53E479, 0xB6F84565, 0xD28E49BC, 0x4BFB9790,
0xE1DDF2DA, 0xA4CB7E33, 0x62FB1341, 0xCEE4C6E8,
0xEF20CADA, 0x36774C01, 0xD07E9EFE, 0x2BF11FB4,
0x95DBDA4D, 0xAE909198, 0xEAAD8E71, 0x6B93D5A0,
0xD08ED1D0, 0xAFC725E0, 0x8E3C5B2F, 0x8E7594B7,
0x8FF6E2FB, 0xF2122B64, 0x8888B812, 0x900DF01C,
0x4FAD5EA0, 0x688FC31C, 0xD1CFF191, 0xB3A8C1AD,
0x2F2F2218, 0xBE0E1777, 0xEA752DFE, 0x8B021FA1,
0xE5A0CC0F, 0xB56F74E8, 0x18ACF3D6, 0xCE89E299,
0xB4A84FE0, 0xFD13E0B7, 0x7CC43B81, 0xD2ADA8D9,
0x165FA266, 0x80957705, 0x93CC7314, 0x211A1477,
0xE6AD2065, 0x77B5FA86, 0xC75442F5, 0xFB9D35CF,
0xEBCDAF0C, 0x7B3E89A0, 0xD6411BD3, 0xAE1E7E49,
0x00250E2D, 0x2071B35E, 0x226800BB, 0x57B8E0AF,
0x2464369B, 0xF009B91E, 0x5563911D, 0x59DFA6AA,
0x78C14389, 0xD95A537F, 0x207D5BA2, 0x02E5B9C5,
0x83260376, 0x6295CFA9, 0x11C81968, 0x4E734A41,
0xB3472DCA, 0x7B14A94A, 0x1B510052, 0x9A532915,
0xD60F573F, 0xBC9BC6E4, 0x2B60A476, 0x81E67400,
0x08BA6FB5, 0x571BE91F, 0xF296EC6B, 0x2A0DD915,
0xB6636521, 0xE7B9F9B6, 0xFF34052E, 0xC5855664,
0x53B02D5D, 0xA99F8FA1, 0x08BA4799, 0x6E85076A
],
[
0x4B7A70E9, 0xB5B32944, 0xDB75092E, 0xC4192623,
0xAD6EA6B0, 0x49A7DF7D, 0x9CEE60B8, 0x8FEDB266,
0xECAA8C71, 0x699A17FF, 0x5664526C, 0xC2B19EE1,
0x193602A5, 0x75094C29, 0xA0591340, 0xE4183A3E,
0x3F54989A, 0x5B429D65, 0x6B8FE4D6, 0x99F73FD6,
0xA1D29C07, 0xEFE830F5, 0x4D2D38E6, 0xF0255DC1,
0x4CDD2086, 0x8470EB26, 0x6382E9C6, 0x021ECC5E,
0x09686B3F, 0x3EBAEFC9, 0x3C971814, 0x6B6A70A1,
0x687F3584, 0x52A0E286, 0xB79C5305, 0xAA500737,
0x3E07841C, 0x7FDEAE5C, 0x8E7D44EC, 0x5716F2B8,
0xB03ADA37, 0xF0500C0D, 0xF01C1F04, 0x0200B3FF,
0xAE0CF51A, 0x3CB574B2, 0x25837A58, 0xDC0921BD,
0xD19113F9, 0x7CA92FF6, 0x94324773, 0x22F54701,
0x3AE5E581, 0x37C2DADC, 0xC8B57634, 0x9AF3DDA7,
0xA9446146, 0x0FD0030E, 0xECC8C73E, 0xA4751E41,
0xE238CD99, 0x3BEA0E2F, 0x3280BBA1, 0x183EB331,
0x4E548B38, 0x4F6DB908, 0x6F420D03, 0xF60A04BF,
0x2CB81290, 0x24977C79, 0x5679B072, 0xBCAF89AF,
0xDE9A771F, 0xD9930810, 0xB38BAE12, 0xDCCF3F2E,
0x5512721F, 0x2E6B7124, 0x501ADDE6, 0x9F84CD87,
0x7A584718, 0x7408DA17, 0xBC9F9ABC, 0xE94B7D8C,
0xEC7AEC3A, 0xDB851DFA, 0x63094366, 0xC464C3D2,
0xEF1C1847, 0x3215D908, 0xDD433B37, 0x24C2BA16,
0x12A14D43, 0x2A65C451, 0x50940002, 0x133AE4DD,
0x71DFF89E, 0x10314E55, 0x81AC77D6, 0x5F11199B,
0x043556F1, 0xD7A3C76B, 0x3C11183B, 0x5924A509,
0xF28FE6ED, 0x97F1FBFA, 0x9EBABF2C, 0x1E153C6E,
0x86E34570, 0xEAE96FB1, 0x860E5E0A, 0x5A3E2AB3,
0x771FE71C, 0x4E3D06FA, 0x2965DCB9, 0x99E71D0F,
0x803E89D6, 0x5266C825, 0x2E4CC978, 0x9C10B36A,
0xC6150EBA, 0x94E2EA78, 0xA5FC3C53, 0x1E0A2DF4,
0xF2F74EA7, 0x361D2B3D, 0x1939260F, 0x19C27960,
0x5223A708, 0xF71312B6, 0xEBADFE6E, 0xEAC31F66,
0xE3BC4595, 0xA67BC883, 0xB17F37D1, 0x018CFF28,
0xC332DDEF, 0xBE6C5AA5, 0x65582185, 0x68AB9802,
0xEECEA50F, 0xDB2F953B, 0x2AEF7DAD, 0x5B6E2F84,
0x1521B628, 0x29076170, 0xECDD4775, 0x619F1510,
0x13CCA830, 0xEB61BD96, 0x0334FE1E, 0xAA0363CF,
0xB5735C90, 0x4C70A239, 0xD59E9E0B, 0xCBAADE14,
0xEECC86BC, 0x60622CA7, 0x9CAB5CAB, 0xB2F3846E,
0x648B1EAF, 0x19BDF0CA, 0xA02369B9, 0x655ABB50,
0x40685A32, 0x3C2AB4B3, 0x319EE9D5, 0xC021B8F7,
0x9B540B19, 0x875FA099, 0x95F7997E, 0x623D7DA8,
0xF837889A, 0x97E32D77, 0x11ED935F, 0x16681281,
0x0E358829, 0xC7E61FD6, 0x96DEDFA1, 0x7858BA99,
0x57F584A5, 0x1B227263, 0x9B83C3FF, 0x1AC24696,
0xCDB30AEB, 0x532E3054, 0x8FD948E4, 0x6DBC3128,
0x58EBF2EF, 0x34C6FFEA, 0xFE28ED61, 0xEE7C3C73,
0x5D4A14D9, 0xE864B7E3, 0x42105D14, 0x203E13E0,
0x45EEE2B6, 0xA3AAABEA, 0xDB6C4F15, 0xFACB4FD0,
0xC742F442, 0xEF6ABBB5, 0x654F3B1D, 0x41CD2105,
0xD81E799E, 0x86854DC7, 0xE44B476A, 0x3D816250,
0xCF62A1F2, 0x5B8D2646, 0xFC8883A0, 0xC1C7B6A3,
0x7F1524C3, 0x69CB7492, 0x47848A0B, 0x5692B285,
0x095BBF00, 0xAD19489D, 0x1462B174, 0x23820E00,
0x58428D2A, 0x0C55F5EA, 0x1DADF43E, 0x233F7061,
0x3372F092, 0x8D937E41, 0xD65FECF1, 0x6C223BDB,
0x7CDE3759, 0xCBEE7460, 0x4085F2A7, 0xCE77326E,
0xA6078084, 0x19F8509E, 0xE8EFD855, 0x61D99735,
0xA969A7AA, 0xC50C06C2, 0x5A04ABFC, 0x800BCADC,
0x9E447A2E, 0xC3453484, 0xFDD56705, 0x0E1E9EC9,
0xDB73DBD3, 0x105588CD, 0x675FDA79, 0xE3674340,
0xC5C43465, 0x713E38D8, 0x3D28F89E, 0xF16DFF20,
0x153E21E7, 0x8FB03D4A, 0xE6E39F2B, 0xDB83ADF7
],
[
0xE93D5A68, 0x948140F7, 0xF64C261C, 0x94692934,
0x411520F7, 0x7602D4F7, 0xBCF46B2E, 0xD4A20068,
0xD4082471, 0x3320F46A, 0x43B7D4B7, 0x500061AF,
0x1E39F62E, 0x97244546, 0x14214F74, 0xBF8B8840,
0x4D95FC1D, 0x96B591AF, 0x70F4DDD3, 0x66A02F45,
0xBFBC09EC, 0x03BD9785, 0x7FAC6DD0, 0x31CB8504,
0x96EB27B3, 0x55FD3941, 0xDA2547E6, 0xABCA0A9A,
0x28507825, 0x530429F4, 0x0A2C86DA, 0xE9B66DFB,
0x68DC1462, 0xD7486900, 0x680EC0A4, 0x27A18DEE,
0x4F3FFEA2, 0xE887AD8C, 0xB58CE006, 0x7AF4D6B6,
0xAACE1E7C, 0xD3375FEC, 0xCE78A399, 0x406B2A42,
0x20FE9E35, 0xD9F385B9, 0xEE39D7AB, 0x3B124E8B,
0x1DC9FAF7, 0x4B6D1856, 0x26A36631, 0xEAE397B2,
0x3A6EFA74, 0xDD5B4332, 0x6841E7F7, 0xCA7820FB,
0xFB0AF54E, 0xD8FEB397, 0x454056AC, 0xBA489527,
0x55533A3A, 0x20838D87, 0xFE6BA9B7, 0xD096954B,
0x55A867BC, 0xA1159A58, 0xCCA92963, 0x99E1DB33,
0xA62A4A56, 0x3F3125F9, 0x5EF47E1C, 0x9029317C,
0xFDF8E802, 0x04272F70, 0x80BB155C, 0x05282CE3,
0x95C11548, 0xE4C66D22, 0x48C1133F, 0xC70F86DC,
0x07F9C9EE, 0x41041F0F, 0x404779A4, 0x5D886E17,
0x325F51EB, 0xD59BC0D1, 0xF2BCC18F, 0x41113564,
0x257B7834, 0x602A9C60, 0xDFF8E8A3, 0x1F636C1B,
0x0E12B4C2, 0x02E1329E, 0xAF664FD1, 0xCAD18115,
0x6B2395E0, 0x333E92E1, 0x3B240B62, 0xEEBEB922,
0x85B2A20E, 0xE6BA0D99, 0xDE720C8C, 0x2DA2F728,
0xD0127845, 0x95B794FD, 0x647D0862, 0xE7CCF5F0,
0x5449A36F, 0x877D48FA, 0xC39DFD27, 0xF33E8D1E,
0x0A476341, 0x992EFF74, 0x3A6F6EAB, 0xF4F8FD37,
0xA812DC60, 0xA1EBDDF8, 0x991BE14C, 0xDB6E6B0D,
0xC67B5510, 0x6D672C37, 0x2765D43B, 0xDCD0E804,
0xF1290DC7, 0xCC00FFA3, 0xB5390F92, 0x690FED0B,
0x667B9FFB, 0xCEDB7D9C, 0xA091CF0B, 0xD9155EA3,
0xBB132F88, 0x515BAD24, 0x7B9479BF, 0x763BD6EB,
0x37392EB3, 0xCC115979, 0x8026E297, 0xF42E312D,
0x6842ADA7, 0xC66A2B3B, 0x12754CCC, 0x782EF11C,
0x6A124237, 0xB79251E7, 0x06A1BBE6, 0x4BFB6350,
0x1A6B1018, 0x11CAEDFA, 0x3D25BDD8, 0xE2E1C3C9,
0x44421659, 0x0A121386, 0xD90CEC6E, 0xD5ABEA2A,
0x64AF674E, 0xDA86A85F, 0xBEBFE988, 0x64E4C3FE,
0x9DBC8057, 0xF0F7C086, 0x60787BF8, 0x6003604D,
0xD1FD8346, 0xF6381FB0, 0x7745AE04, 0xD736FCCC,
0x83426B33, 0xF01EAB71, 0xB0804187, 0x3C005E5F,
0x77A057BE, 0xBDE8AE24, 0x55464299, 0xBF582E61,
0x4E58F48F, 0xF2DDFDA2, 0xF474EF38, 0x8789BDC2,
0x5366F9C3, 0xC8B38E74, 0xB475F255, 0x46FCD9B9,
0x7AEB2661, 0x8B1DDF84, 0x846A0E79, 0x915F95E2,
0x466E598E, 0x20B45770, 0x8CD55591, 0xC902DE4C,
0xB90BACE1, 0xBB8205D0, 0x11A86248, 0x7574A99E,
0xB77F19B6, 0xE0A9DC09, 0x662D09A1, 0xC4324633,
0xE85A1F02, 0x09F0BE8C, 0x4A99A025, 0x1D6EFE10,
0x1AB93D1D, 0x0BA5A4DF, 0xA186F20F, 0x2868F169,
0xDCB7DA83, 0x573906FE, 0xA1E2CE9B, 0x4FCD7F52,
0x50115E01, 0xA70683FA, 0xA002B5C4, 0x0DE6D027,
0x9AF88C27, 0x773F8641, 0xC3604C06, 0x61A806B5,
0xF0177A28, 0xC0F586E0, 0x006058AA, 0x30DC7D62,
0x11E69ED7, 0x2338EA63, 0x53C2DD94, 0xC2C21634,
0xBBCBEE56, 0x90BCB6DE, 0xEBFC7DA1, 0xCE591D76,
0x6F05E409, 0x4B7C0188, 0x39720A3D, 0x7C927C24,
0x86E3725F, 0x724D9DB9, 0x1AC15BB4, 0xD39EB8FC,
0xED545578, 0x08FCA5B5, 0xD83D7CD3, 0x4DAD0FC4,
0x1E50EF5E, 0xB161E6F8, 0xA28514D9, 0x6C51133C,
0x6FD5C7E7, 0x56E14EC4, 0x362ABFCE, 0xDDC6C837,
0xD79A3234, 0x92638212, 0x670EFA8E, 0x406000E0
],
[
0x3A39CE37, 0xD3FAF5CF, 0xABC27737, 0x5AC52D1B,
0x5CB0679E, 0x4FA33742, 0xD3822740, 0x99BC9BBE,
0xD5118E9D, 0xBF0F7315, 0xD62D1C7E, 0xC700C47B,
0xB78C1B6B, 0x21A19045, 0xB26EB1BE, 0x6A366EB4,
0x5748AB2F, 0xBC946E79, 0xC6A376D2, 0x6549C2C8,
0x530FF8EE, 0x468DDE7D, 0xD5730A1D, 0x4CD04DC6,
0x2939BBDB, 0xA9BA4650, 0xAC9526E8, 0xBE5EE304,
0xA1FAD5F0, 0x6A2D519A, 0x63EF8CE2, 0x9A86EE22,
0xC089C2B8, 0x43242EF6, 0xA51E03AA, 0x9CF2D0A4,
0x83C061BA, 0x9BE96A4D, 0x8FE51550, 0xBA645BD6,
0x2826A2F9, 0xA73A3AE1, 0x4BA99586, 0xEF5562E9,
0xC72FEFD3, 0xF752F7DA, 0x3F046F69, 0x77FA0A59,
0x80E4A915, 0x87B08601, 0x9B09E6AD, 0x3B3EE593,
0xE990FD5A, 0x9E34D797, 0x2CF0B7D9, 0x022B8B51,
0x96D5AC3A, 0x017DA67D, 0xD1CF3ED6, 0x7C7D2D28,
0x1F9F25CF, 0xADF2B89B, 0x5AD6B472, 0x5A88F54C,
0xE029AC71, 0xE019A5E6, 0x47B0ACFD, 0xED93FA9B,
0xE8D3C48D, 0x283B57CC, 0xF8D56629, 0x79132E28,
0x785F0191, 0xED756055, 0xF7960E44, 0xE3D35E8C,
0x15056DD4, 0x88F46DBA, 0x03A16125, 0x0564F0BD,
0xC3EB9E15, 0x3C9057A2, 0x97271AEC, 0xA93A072A,
0x1B3F6D9B, 0x1E6321F5, 0xF59C66FB, 0x26DCF319,
0x7533D928, 0xB155FDF5, 0x03563482, 0x8ABA3CBB,
0x28517711, 0xC20AD9F8, 0xABCC5167, 0xCCAD925F,
0x4DE81751, 0x3830DC8E, 0x379D5862, 0x9320F991,
0xEA7A90C2, 0xFB3E7BCE, 0x5121CE64, 0x774FBE32,
0xA8B6E37E, 0xC3293D46, 0x48DE5369, 0x6413E680,
0xA2AE0810, 0xDD6DB224, 0x69852DFD, 0x09072166,
0xB39A460A, 0x6445C0DD, 0x586CDECF, 0x1C20C8AE,
0x5BBEF7DD, 0x1B588D40, 0xCCD2017F, 0x6BB4E3BB,
0xDDA26A7E, 0x3A59FF45, 0x3E350A44, 0xBCB4CDD5,
0x72EACEA8, 0xFA6484BB, 0x8D6612AE, 0xBF3C6F47,
0xD29BE463, 0x542F5D9E, 0xAEC2771B, 0xF64E6370,
0x740E0D8D, 0xE75B1357, 0xF8721671, 0xAF537D5D,
0x4040CB08, 0x4EB4E2CC, 0x34D2466A, 0x0115AF84,
0xE1B00428, 0x95983A1D, 0x06B89FB4, 0xCE6EA048,
0x6F3F3B82, 0x3520AB82, 0x011A1D4B, 0x277227F8,
0x611560B1, 0xE7933FDC, 0xBB3A792B, 0x344525BD,
0xA08839E1, 0x51CE794B, 0x2F32C9B7, 0xA01FBAC9,
0xE01CC87E, 0xBCC7D1F6, 0xCF0111C3, 0xA1E8AAC7,
0x1A908749, 0xD44FBD9A, 0xD0DADECB, 0xD50ADA38,
0x0339C32A, 0xC6913667, 0x8DF9317C, 0xE0B12B4F,
0xF79E59B7, 0x43F5BB3A, 0xF2D519FF, 0x27D9459C,
0xBF97222C, 0x15E6FC2A, 0x0F91FC71, 0x9B941525,
0xFAE59361, 0xCEB69CEB, 0xC2A86459, 0x12BAA8D1,
0xB6C1075E, 0xE3056A0C, 0x10D25065, 0xCB03A442,
0xE0EC6E0E, 0x1698DB3B, 0x4C98A0BE, 0x3278E964,
0x9F1F9532, 0xE0D392DF, 0xD3A0342B, 0x8971F21E,
0x1B0A7441, 0x4BA3348C, 0xC5BE7120, 0xC37632D8,
0xDF359F8D, 0x9B992F2E, 0xE60B6F47, 0x0FE3F11D,
0xE54CDA54, 0x1EDAD891, 0xCE6279CF, 0xCD3E7E6F,
0x1618B166, 0xFD2C1D05, 0x848FD2C5, 0xF6FB2299,
0xF523F357, 0xA6327623, 0x93A83531, 0x56CCCD02,
0xACF08162, 0x5A75EBB5, 0x6E163697, 0x88D273CC,
0xDE966292, 0x81B949D0, 0x4C50901B, 0x71C65614,
0xE6C6C7BD, 0x327A140A, 0x45E1D006, 0xC3F27B9A,
0xC9AA53FD, 0x62A80F00, 0xBB25BFE2, 0x35BDD2F6,
0x71126905, 0xB2040222, 0xB6CBCF7C, 0xCD769C2B,
0x53113EC0, 0x1640E3D3, 0x38ABBD60, 0x2547ADF0,
0xBA38209C, 0xF746CE76, 0x77AFA1C5, 0x20756060,
0x85CBFE4E, 0x8AE88DD8, 0x7AAAF9B0, 0x4CF9AA7E,
0x1948C25C, 0x02FB8A8C, 0x01C36AE4, 0xD6EBE1F9,
0x90D4F869, 0xA65CDEA0, 0x3F09252D, 0xC208E69F,
0xB74E6132, 0xCE77E25B, 0x578FDFE3, 0x3AC372E6
]
]
# Cycle through the p-boxes and round-robin XOR the
# key with the p-boxes
key_len = len (key)
index = 0
for i in range (len (self.p_boxes)):
val = (ord (key[index % key_len]) << 24) + \
(ord (key[(index + 1) % key_len]) << 16) + \
(ord (key[(index + 2) % key_len]) << 8) + \
ord (key[(index + 3) % key_len])
self.p_boxes[i] = self.p_boxes[i] ^ val
index = index + 4
# For the chaining process
l, r = 0, 0
# Begin chain replacing the p-boxes
for i in range (0, len (self.p_boxes), 2):
l, r = self.cipher (l, r, self.ENCRYPT)
self.p_boxes[i] = l
self.p_boxes[i + 1] = r
# Chain replace the s-boxes
for i in range (len (self.s_boxes)):
for j in range (0, len (self.s_boxes[i]), 2):
l, r = self.cipher (l, r, self.ENCRYPT)
self.s_boxes[i][j] = l
self.s_boxes[i][j + 1] = r
self.initCTR()
def cipher (self, xl, xr, direction):
"""Encryption primitive"""
if direction == self.ENCRYPT:
for i in range (16):
xl = xl ^ self.p_boxes[i]
xr = self.__round_func (xl) ^ xr
xl, xr = xr, xl
xl, xr = xr, xl
xr = xr ^ self.p_boxes[16]
xl = xl ^ self.p_boxes[17]
else:
for i in range (17, 1, -1):
xl = xl ^ self.p_boxes[i]
xr = self.__round_func (xl) ^ xr
xl, xr = xr, xl
xl, xr = xr, xl
xr = xr ^ self.p_boxes[1]
xl = xl ^ self.p_boxes[0]
return xl, xr
def __round_func (self, xl):
a = (xl & 0xFF000000) >> 24
b = (xl & 0x00FF0000) >> 16
c = (xl & 0x0000FF00) >> 8
d = xl & 0x000000FF
# Perform all ops as longs then and out the last 32-bits to
# obtain the integer
f = (long (self.s_boxes[0][a]) + long (self.s_boxes[1][b])) % self.modulus
f = f ^ long (self.s_boxes[2][c])
f = f + long (self.s_boxes[3][d])
f = (f % self.modulus) & 0xFFFFFFFF
return f
def encrypt (self, data):
if not len (data) == 8:
raise RuntimeError, "Attempted to encrypt data of invalid block length: %s" % len(data)
# Use big endianess since that's what everyone else uses
xl = ord (data[3]) | (ord (data[2]) << 8) | (ord (data[1]) << 16) | (ord (data[0]) << 24)
xr = ord (data[7]) | (ord (data[6]) << 8) | (ord (data[5]) << 16) | (ord (data[4]) << 24)
cl, cr = self.cipher (xl, xr, self.ENCRYPT)
chars = ''.join ([
chr ((cl >> 24) & 0xFF), chr ((cl >> 16) & 0xFF), chr ((cl >> 8) & 0xFF), chr (cl & 0xFF),
chr ((cr >> 24) & 0xFF), chr ((cr >> 16) & 0xFF), chr ((cr >> 8) & 0xFF), chr (cr & 0xFF)
])
return chars
def decrypt (self, data):
if not len (data) == 8:
raise RuntimeError, "Attempted to encrypt data of invalid block length: %s" % len(data)
# Use big endianess since that's what everyone else uses
cl = ord (data[3]) | (ord (data[2]) << 8) | (ord (data[1]) << 16) | (ord (data[0]) << 24)
cr = ord (data[7]) | (ord (data[6]) << 8) | (ord (data[5]) << 16) | (ord (data[4]) << 24)
xl, xr = self.cipher (cl, cr, self.DECRYPT)
chars = ''.join ([
chr ((xl >> 24) & 0xFF), chr ((xl >> 16) & 0xFF), chr ((xl >> 8) & 0xFF), chr (xl & 0xFF),
chr ((xr >> 24) & 0xFF), chr ((xr >> 16) & 0xFF), chr ((xr >> 8) & 0xFF), chr (xr & 0xFF)
])
return chars
# ==== CBC Mode ====
def initCBC(self, iv=0):
"""Initializes CBC mode of the cypher"""
assert struct.calcsize("Q") == self.block_size()
self.cbc_iv = struct.pack("Q", iv)
def encryptCBC(self, data):
"""
Encrypts a buffer of data using CBC mode. Multiple successive buffers
(belonging to the same logical stream of buffers) can be encrypted
with this method one after the other without any intermediate work.
Each buffer must be a multiple of 8-octets (64-bits) in length.
"""
if type(data) != types.StringType:
raise RuntimeError, "Can only work on 8-bit strings"
if (len(data) % 8) != 0:
raise RuntimeError, "Can only work with data in 64-bit multiples in CBC mode"
xor = lambda t: ord(t[0]) ^ ord(t[1])
result = ''
block_size = self.block_size()
for i in range(0, len(data), block_size):
p_block = data[i:i+block_size]
pair = zip(p_block, self.cbc_iv)
j_block = ''.join(map(chr, map(xor, pair)))
c_block = self.encrypt(j_block)
result += c_block
self.cbc_iv = c_block
return result
def decryptCBC(self, data):
if type(data) != types.StringType:
raise RuntimeError, "Can only work on 8-bit strings"
if (len(data) % 8) != 0:
raise RuntimeError, "Can only work with data in 64-bit multiples in CBC mode"
xor = lambda t: ord(t[0]) ^ ord(t[1])
result = ''
block_size = self.block_size()
for i in range(0, len(data), block_size):
c_block = data[i:i+block_size]
j_block = self.decrypt(c_block)
pair = zip(j_block, self.cbc_iv)
p_block = ''.join(map(chr, map(xor, pair)))
result += p_block
self.cbc_iv = c_block
return result
# ==== CTR Mode ====
def initCTR(self, iv=0):
"""Initializes CTR mode of the cypher"""
assert struct.calcsize("Q") == self.block_size()
self.ctr_iv = iv
self._calcCTRBUF()
def _calcCTRBUF(self):
"""Calculates one block of CTR keystream"""
self.ctr_cks = self.encrypt(struct.pack("Q", self.ctr_iv)) # keystream block
self.ctr_iv += 1
self.ctr_pos = 0
def _nextCTRByte(self):
"""Returns one byte of CTR keystream"""
b = ord(self.ctr_cks[self.ctr_pos])
self.ctr_pos += 1
if self.ctr_pos >= len(self.ctr_cks):
self._calcCTRBUF()
return b
def encryptCTR(self, data):
"""
Encrypts a buffer of data with CTR mode. Multiple successive buffers
(belonging to the same logical stream of buffers) can be encrypted
with this method one after the other without any intermediate work.
"""
if type(data) != types.StringType:
raise RuntimeException, "Can only work on 8-bit strings"
result = []
for ch in data:
result.append(chr(ord(ch) ^ self._nextCTRByte()))
return "".join(result)
def decryptCTR(self, data):
return self.encryptCTR(data)
def block_size(self):
return 8
def key_length(self):
return 56
def key_bits(self):
return 56 * 8
@staticmethod
def testVectors():
import binascii
# for more vectors see http://www.schneier.com/code/vectors.txt
vectors = (
('0000000000000000', '0000000000000000', '4EF997456198DD78'),
('FFFFFFFFFFFFFFFF', 'FFFFFFFFFFFFFFFF', '51866FD5B85ECB8A'),
('3000000000000000', '1000000000000001', '7D856F9A613063F2'),
('1111111111111111', '1111111111111111', '2466DD878B963C9D'),
('49E95D6D4CA229BF', '02FE55778117F12A', 'CF9C5D7A4986ADB5'),
('E0FEE0FEF1FEF1FE', '0123456789ABCDEF', 'C39E072D9FAC631D'),
('07A7137045DA2A16', '3BDD119049372802', '2EEDDA93FFD39C79'),
)
ok = True
for v in vectors:
c = Blowfish(binascii.a2b_hex(v[0]))
e = binascii.b2a_hex(c.encrypt(binascii.a2b_hex(v[1]))).upper()
if e != v[2]:
print "VECTOR TEST FAIL: expecting %s, got %s" % (repr(v), e)
ok = False
return ok
##############################################################
# Module testing
if __name__ == '__main__':
if not Blowfish.testVectors():
print "WARNING: The implementation doesn't pass algorithm test vectors!"
else:
print "The implementation passes algorithm test vectors (ECB)."
key = 'This is a test key'
cipher = Blowfish (key)
print "Testing encryption:"
xl = 123456
xr = 654321
print "\tPlain text: (%s, %s)" %(xl, xr)
cl, cr = cipher.cipher (xl, xr, cipher.ENCRYPT)
print "\tCrypted is: (%s, %s)" %(cl, cr)
dl, dr = cipher.cipher (cl, cr, cipher.DECRYPT)
print "\tUnencrypted is: (%s, %s)" %(dl, dr)
print "Testing block encrypt:"
text = 'testtest'
print "\tText:\t\t%s" %text
crypted = cipher.encrypt(text)
print "\tEncrypted:\t%s" % repr(crypted)
decrypted = cipher.decrypt(crypted)
print "\tDecrypted:\t%s" %decrypted
print "Testing CTR encrypt:"
cipher.initCTR()
text = "The quick brown fox jumps over the lazy dog"
print "\tText:\t\t", text
crypted = cipher.encryptCTR(text)
print "\tEncrypted:\t", repr(crypted)
cipher.initCTR()
decrypted = cipher.decryptCTR(crypted)
print "\tDecrypted:\t", decrypted
print "Testing CBC encrypt:"
cipher.initCBC()
text = "Owen's Ornery Old Oryx Obstructed Olga's Optics."
print "\tText:\t\t", text
crypted = cipher.encryptCBC(text)
print "\tEncrypted:\t", repr(crypted)
cipher.initCBC()
decrypted = cipher.decryptCBC(crypted)
print "\tDecrypted:\t", decrypted
print "Testing speed"
from time import time
t1 = time()
n = 0
tlen = 0
while True:
for i in xrange(1000):
tstr = "The quick brown fox jumps over the lazy dog %d" % i
enc = cipher.encryptCTR(tstr)
tlen += len(tstr)
n += 1000
t2 = time()
if t2 - t1 > 5:
break
t = t2 - t1
print "%d encryptions in %0.1f seconds: %0.1f enc/s, %0.1f bytes/s" % (n, t, n / t, tlen / t)
|
|
'''
Backend script for the "E-Mail defer"-Zimlet available
at https://github.com/dploeger/zimbra-zimlet-emaildefer
@author: Dennis Ploeger <develop@dieploegers.de>
'''
# Imports
from com.zimbra.client import ZMailbox
from com.zimbra.common.account.Key import AccountBy
from com.zimbra.client import ZSearchParams
from com.zimbra.cs.account.soap import SoapProvisioning
from com.zimbra.common.util import ZimbraLog
from java.util import TimeZone
import logging
from optparse import OptionParser
if __name__ == "__main__":
# Interpret arguments
parser = OptionParser(
usage="Usage: %prog [options] SERVER USERNAME PASSWORD",
description="SERVER: Name/IP of Zimbra-Server, "
+ "USERNAME: Administrative account username, "
+ "PASSWORD: Password of administrative account"
)
parser.add_option(
"-q",
"--quiet",
action="store_true",
dest="quiet",
help="Be quiet doing things.",
)
parser.add_option(
"-d",
"--debug",
action="store_true",
dest="debug",
help="Enable debug logging"
)
(options, args) = parser.parse_args()
if (len(args) < 3):
parser.error("Invalid number of arguments")
(server_name, admin_account, admin_password) = args
if options.quiet and options.debug:
parser.error("Cannot specify debug and quiet at the same time.")
if options.quiet:
logging.basicConfig(level=logging.FATAL)
elif options.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug("Starting deferAgent")
# Configure log4j (mainly to avoid Warnings)
ZimbraLog.toolSetupLog4jConsole("INFO", True, False)
# Connect to admin service
logging.debug(
"Authenticating against Zimbra-Server %s with user %s" % (
server_name,
admin_account
)
)
sp = SoapProvisioning()
sp.soapSetURI("https://%s:7071/service/admin/soap" % (server_name))
sp.soapAdminAuthenticate(admin_account, admin_password)
logging.debug("Fetching all domains")
domains = sp.getAllDomains()
for current_domain in domains:
logging.debug("Getting all accounts for domain %s" % (current_domain))
domainusers = sp.getAllAccounts(current_domain)
for current_user in domainusers:
logging.debug(
"Checking, if user %s is active." % (current_user)
)
logging.debug(
"Checking, if user %s has the zimlet configured"\
% (current_user)
)
# Connect to mailbox service using Administrator accounts
# Get Mailbox Options from Provisioning
sai = sp.getAccountInfo(
AccountBy.name,
current_user.getMail()
)
sa = sp.getAccount(current_user.getMail())
# Check, wether account is active
if not sa.isAccountStatusActive():
logging.info("Account %s is inactive" % (current_user))
continue
dar = sp.delegateAuth(
AccountBy.name,
current_user.getMail(),
60 * 60 * 24
)
opt = ZMailbox.Options(
dar.getAuthToken(),
sai.getAdminSoapURL()
)
mailbox = ZMailbox.getMailbox(opt)
accountinfo = mailbox.getAccountInfo(True)
defer_folder_id = None
defer_tag_id = None
for key in accountinfo.getZimletProps()[
"zimbraZimletUserProperties"
]:
if ("de_dieploegers_emaildefer" in key) and\
("deferFolderId" in key):
defer_folder_id = key.split(":")[2]
elif ("de_dieploegers_emaildefer" in key) and\
("deferTagId" in key):
defer_tag_id = key.split(":")[2]
if defer_folder_id != None and defer_tag_id != None:
logging.info(
"Checking for deferred mails of user %s" % (current_user)
)
# Check, if folder and tag exist
if mailbox.getTagById(defer_tag_id) == None:
logging.warn(
"Tag with ID %s doesn't exist for user %s" %
(
defer_tag_id,
current_user
)
)
continue
if mailbox.getFolderById(defer_folder_id) == None:
logging.warn(
"Folder with ID %s doesn't exist for user %s" %
(
defer_folder_id,
current_user
)
)
continue
# This user is using the defer-zimlet
searchparams = ZSearchParams(
"inid: %s and date:<=+0minute" % (defer_folder_id)
)
searchparams.setTypes(ZSearchParams.TYPE_MESSAGE)
searchparams.setTimeZone(
TimeZone.getTimeZone(
sa.getPrefTimeZoneId()[0]
)
)
searchparams.setLimit(9999)
# Get E-Mails in the defer folder aged today and older
results = mailbox.search(searchparams)
if results.getHits().size() > 0:
logging.info(
"Found %d deferred mails" % (
results.getHits().size()
)
)
else:
logging.info("No mails found")
for i in range(results.getHits().size()):
current_message = results.getHits().get(i).getId()
logging.info("Moving message %d" % (i + 1))
logging.debug(
"Message: %s" % (
results.getHits().get(i).dump()
)
)
result = mailbox.moveMessage(
current_message,
mailbox.getInbox().getId()
)
logging.info("Marking message as read")
result = mailbox.markItemRead(
current_message,
False,
None
)
logging.info("Tagging message")
result = mailbox.tagMessage(
current_message,
defer_tag_id,
True
)
|
|
from functools import wraps
import json
import sys
import traceback
from copy import deepcopy
from flask.ext import restful
from flask import make_response, request, Markup, g, current_app
from werkzeug.exceptions import HTTPException
from hoops.status import library as status_library
from hoops.response import APIResponse
from hoops.exc import APIException, APIValidationException
from hoops.status import APIStatus
from hoops.utils import Struct
import logging
error_map = {
200: status_library.API_OK,
403: status_library.API_FORBIDDEN,
404: status_library.API_RESOURCE_NOT_FOUND,
405: status_library.API_INVALID_REQUEST_METHOD,
500: status_library.API_UNHANDLED_EXCEPTION,
501: status_library.API_CODE_NOT_IMPLEMENTED,
}
api_logger = logging.getLogger('api.info')
request_logger = logging.getLogger('api.request')
api_error_logger = logging.getLogger('api.error')
error_logger = logging.getLogger('error')
class Resource(restful.Resource):
# applies to all inherited resources; OauthAPI will append 'require_oauth' on init
method_decorators = []
class API(restful.Api):
def __init__(self, *args, **kwargs):
super(API, self).__init__(*args, **kwargs)
self.representations = {
#'application/xml': output_xml,
#'text/xml': output_xml,
'application/json': output_json,
}
def make_response(self, *args, **kwargs):
response = restful.Api.make_response(self, *args, **kwargs)
try:
message = getattr(args[0], 'response', None).get('status_message', None)
except:
message = args[0]
request_logger.info('%s: %s', response.data, message)
if response.status_code >= 500:
error_logger.exception('%s: %s', response.data, message)
return response
def handle_error(self, e):
if isinstance(e, HTTPException):
return self.make_response(
APIResponse(None, status=error_map.get(e.code, APIStatus(http_status=e.code, status_code=e.code * 10, message=e.description))),
e.code)
elif isinstance(e, APIValidationException):
return self.make_response(
APIResponse(None, status=e.status, extra=e.extra),
e.status.http_status)
elif isinstance(e, APIException):
return self.make_response(
APIResponse(None, status=e.status),
e.status.http_status)
status = status_library.API_UNHANDLED_EXCEPTION
if current_app.config.get('DEBUG'):
tb_info = sys.exc_info()
return self.make_response(
APIResponse(None, status=status, extra={
'exception': traceback.format_exception_only(tb_info[0], tb_info[1])[0],
'traceback': traceback.extract_tb(tb_info[2])
}), status.http_status)
# We don't use the default error handler
# e.g.: return super(API, self).handle_error(e)
return self.make_response(
APIResponse(None, status=status), status.http_status)
def _should_use_fr_error_handler(self):
""" Determine if error should be handled with FR or default Flask
Return True since we need all errors handled in above handler.
"""
return True
def mediatypes(self):
"""Replaces the acceptable media types with application/json if the request came from a browser.
Also looks for output_type parameter.
"""
if request.args.get('output_format', '') == 'xml' or request.form.get('output_format', '') == 'xml':
return ['application/xml']
elif request.args.get('output_format', '') == 'json' or request.form.get('output_format', '') == 'json':
return ['application/json']
if (('text/html' in request.accept_mimetypes or
'application/xhtml+xml' in request.accept_mimetypes)
and 'Mozilla' in request.user_agent.string):
return ['application/json']
return super(API, self).mediatypes()
def register(self, cls):
routes = [cls.route] if cls.route else []
object_route = getattr(cls, 'object_route', None)
if object_route:
routes.append(object_route)
if routes:
[api_logger.debug('Adding route %s' % route) for route in routes]
self.add_resource(cls, *routes, endpoint=cls.route)
class OAuthAPI(API):
'''Only a single API at a time can be supported. Using OAuthAPI causes all resources to require OAuth.'''
def __init__(self, *args, **kwargs):
# TODO:
# - make oauth app specific (e.g. extra params, diff Resource for Oauth inheritence, etc?)
# - allow adhok usage of oauth on some Resource objects
# - define alternate oauth arg handling methods besides static creds
oauth_args = kwargs['oauth_args']
del(kwargs['oauth_args'])
super(API, self).__init__(*args, **kwargs)
Resource.method_decorators = [require_oauth]
Resource.oauth_args = Struct(**oauth_args)
def require_oauth(func):
'''Auth wrapper from http://flask-restful.readthedocs.org/en/latest/extending.html?highlight=authentication'''
@wraps(func)
def wrapper(*args, **kwargs):
from hoops.oauth_provider import oauth_authentication
# TODO: read server_oauth_creds from args/func
server_oauth_creds = {}
oauth_creds = oauth_authentication(server_oauth_creds)
if not oauth_creds:
# This is highly unlikely to occur, as oauth raises exceptions on problems
restful.abort(401) # pragma: no cover
return func(*args, **kwargs)
return wrapper
def prepare_output(data, code, headers=None):
if not isinstance(data, APIResponse):
data = APIResponse(data, status=error_map.get(code, APIStatus(
http_status=code, status_code=code * 10, message=data
)))
out = data.to_json()
code = data.status.http_status
return_string = unicode(data.response)
response_data = unicode(data.response.get('response_data')) if data.response.get('response_data') else return_string
request_logger.info('Response %d chars: %s...', len(return_string), unicode(response_data[:50]))
request_logger.debug('Response body: %s', return_string)
return out, code
def output_json(data, code, headers=None):
"""Makes a Flask response with a JSON encoded body"""
out, code = prepare_output(data, code, headers)
resp = make_response(json.dumps(out,
sort_keys=True,
indent=4,
separators=(',', ': ')), code)
resp.headers.extend(headers or {})
return resp
#def output_xml(data, code, headers=None):
# """Makes a Flask response with a XML encoded body"""
# out, code = prepare_output(data, code, headers)
# resp = xmlify(out)
# resp.code = code
# resp.headers.extend(headers or {})
#
# return resp
#
#
#def xmlify(output):
# """
# xmlfy takes a dictionary and converts it to xml.
# """
# XML_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>'
# nodes = serialize_xml({'jetlaunch': output})
#
# r = make_response(Markup(XML_DECLARATION + ''.join(etree.tostring(node) for node in nodes)))
# r.mimetype = 'text/xml'
#
# return r
#
#
#def serialize_xml(root):
# node = None
# node_stack = []
# for key in root.keys():
# node = etree.Element(key)
# if isinstance(root[key], dict):
# inner_node_stack = serialize(root[key])
# for inner_node in inner_node_stack:
# node.append(inner_node)
# elif isinstance(root[key], list):
# for item in root[key]:
# itemnode = etree.Element('item') # magic string
# inner_node_stack = serialize(item)
# for inner_node in inner_node_stack:
# itemnode.append(inner_node)
# node.append(itemnode)
# else:
# if root[key] is not None:
# node.text = unicode(root[key])
# node_stack.append(node)
#
# return node_stack
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .turner import TurnerBaseIE
from ..utils import (
ExtractorError,
int_or_none,
)
class AdultSwimIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?'
_TESTS = [{
'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
'playlist': [
{
'md5': '247572debc75c7652f253c8daa51a14d',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow-0',
'ext': 'flv',
'title': 'Rick and Morty - Pilot Part 1',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
},
{
'md5': '77b0e037a4b20ec6b98671c4c379f48d',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow-3',
'ext': 'flv',
'title': 'Rick and Morty - Pilot Part 4',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
},
],
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
'title': 'Rick and Morty - Pilot',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
'skip': 'This video is only available for registered users',
}, {
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
'playlist': [
{
'md5': '2eb5c06d0f9a1539da3718d897f13ec5',
'info_dict': {
'id': '-t8CamQlQ2aYZ49ItZCFog-0',
'ext': 'flv',
'title': 'American Dad - Putting Francine Out of Business',
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
},
}
],
'info_dict': {
'id': '-t8CamQlQ2aYZ49ItZCFog',
'title': 'American Dad - Putting Francine Out of Business',
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
},
}, {
'url': 'http://www.adultswim.com/videos/tim-and-eric-awesome-show-great-job/dr-steve-brule-for-your-wine/',
'playlist': [
{
'md5': '3e346a2ab0087d687a05e1e7f3b3e529',
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0',
'ext': 'mp4',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
},
}
],
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# heroMetadata.trailer
'url': 'http://www.adultswim.com/videos/decker/inside-decker-a-new-hero/',
'info_dict': {
'id': 'I0LQFQkaSUaFp8PnAWHhoQ',
'ext': 'mp4',
'title': 'Decker - Inside Decker: A New Hero',
'description': 'md5:c916df071d425d62d70c86d4399d3ee0',
'duration': 249.008,
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.adultswim.com/videos/toonami/friday-october-14th-2016/',
'info_dict': {
'id': 'eYiLsKVgQ6qTC6agD67Sig',
'title': 'Toonami - Friday, October 14th, 2016',
'description': 'md5:99892c96ffc85e159a428de85c30acde',
},
'playlist': [{
'md5': '',
'info_dict': {
'id': 'eYiLsKVgQ6qTC6agD67Sig',
'ext': 'mp4',
'title': 'Toonami - Friday, October 14th, 2016',
'description': 'md5:99892c96ffc85e159a428de85c30acde',
},
}],
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}]
@staticmethod
def find_video_info(collection, slug):
for video in collection.get('videos'):
if video.get('slug') == slug:
return video
@staticmethod
def find_collection_by_linkURL(collections, linkURL):
for collection in collections:
if collection.get('linkURL') == linkURL:
return collection
@staticmethod
def find_collection_containing_video(collections, slug):
for collection in collections:
for video in collection.get('videos'):
if video.get('slug') == slug:
return collection, video
return None, None
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
show_path = mobj.group('show_path')
episode_path = mobj.group('episode_path')
is_playlist = True if mobj.group('is_playlist') else False
webpage = self._download_webpage(url, episode_path)
# Extract the value of `bootstrappedData` from the Javascript in the page.
bootstrapped_data = self._parse_json(self._search_regex(
r'var bootstrappedData = ({.*});', webpage, 'bootstraped data'), episode_path)
# Downloading videos from a /videos/playlist/ URL needs to be handled differently.
# NOTE: We are only downloading one video (the current one) not the playlist
if is_playlist:
collections = bootstrapped_data['playlists']['collections']
collection = self.find_collection_by_linkURL(collections, show_path)
video_info = self.find_video_info(collection, episode_path)
show_title = video_info['showTitle']
segment_ids = [video_info['videoPlaybackID']]
else:
collections = bootstrapped_data['show']['collections']
collection, video_info = self.find_collection_containing_video(collections, episode_path)
# Video wasn't found in the collections, let's try `slugged_video`.
if video_info is None:
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
video_info = bootstrapped_data['slugged_video']
if not video_info:
video_info = bootstrapped_data.get(
'heroMetadata', {}).get('trailer', {}).get('video')
if not video_info:
video_info = bootstrapped_data.get('onlineOriginals', [None])[0]
if not video_info:
raise ExtractorError('Unable to find video info')
show = bootstrapped_data['show']
show_title = show['title']
stream = video_info.get('stream')
if stream and stream.get('videoPlaybackID'):
segment_ids = [stream['videoPlaybackID']]
elif video_info.get('clips'):
segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
elif video_info.get('videoPlaybackID'):
segment_ids = [video_info['videoPlaybackID']]
elif video_info.get('id'):
segment_ids = [video_info['id']]
else:
if video_info.get('auth') is True:
raise ExtractorError(
'This video is only available via cable service provider subscription that'
' is not currently supported. You may want to use --cookies.', expected=True)
else:
raise ExtractorError('Unable to find stream or clips')
episode_id = video_info['id']
episode_title = video_info['title']
episode_description = video_info.get('description')
episode_duration = int_or_none(video_info.get('duration'))
view_count = int_or_none(video_info.get('views'))
entries = []
for part_num, segment_id in enumerate(segment_ids):
segement_info = self._extract_cvp_info(
'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id,
segment_id, {
'secure': {
'media_src': 'http://androidhls-secure.cdn.turner.com/adultswim/big',
'tokenizer_src': 'http://www.adultswim.com/astv/mvpd/processors/services/token_ipadAdobe.do',
},
})
segment_title = '%s - %s' % (show_title, episode_title)
if len(segment_ids) > 1:
segment_title += ' Part %d' % (part_num + 1)
segement_info.update({
'id': segment_id,
'title': segment_title,
'description': episode_description,
})
entries.append(segement_info)
return {
'_type': 'playlist',
'id': episode_id,
'display_id': episode_path,
'entries': entries,
'title': '%s - %s' % (show_title, episode_title),
'description': episode_description,
'duration': episode_duration,
'view_count': view_count,
}
|
|
# encoding: utf8
from __future__ import unicode_literals
from optparse import make_option
from collections import OrderedDict
from importlib import import_module
import itertools
import traceback
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_migrate_signal, emit_pre_migrate_signal
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.state import ProjectState
from django.db.migrations.autodetector import MigrationAutodetector
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
make_option('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them'),
make_option('--list', '-l', action='store_true', dest='list', default=False,
help='Show a list of all known migrations and which are applied'),
)
help = "Updates database schema. Manages both apps with migrations and those without."
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity'))
self.interactive = options.get('interactive')
self.show_traceback = options.get('traceback')
self.load_initial_data = options.get('load_initial_data')
self.test_database = options.get('test_database', False)
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# If they asked for a migration listing, quit main execution flow and show it
if options.get("list", False):
return self.show_migration_list(connection, args)
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError("Conflicting migrations detected (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % name_str)
# If they supplied command line arguments, work out what they mean.
run_syncdb = False
target_app_labels_only = True
if len(args) > 2:
raise CommandError("Too many command-line arguments (expecting 'app_label' or 'app_label migrationname')")
elif len(args) == 2:
app_label, migration_name = args
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (app_label, migration_name))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (app_label, migration_name))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif len(args) == 1:
app_label = args[0]
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
run_syncdb = True
plan = executor.migration_plan(targets)
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") + (", ".join(executor.loader.unmigrated_apps) or "(none)"))
if target_app_labels_only:
self.stdout.write(self.style.MIGRATE_LABEL(" Apply all migrations: ") + (", ".join(set(a for a, n in targets)) or "(none)"))
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(" Unapply all migrations: ") + "%s" % (targets[0][0], ))
else:
self.stdout.write(self.style.MIGRATE_LABEL(" Target specific migration: ") + "%s, from %s" % (targets[0][1], targets[0][0]))
# Run the syncdb phase.
# If you ever manage to get rid of this, I owe you many, many drinks.
# Note that pre_migrate is called from inside here, as it needs
# the list of models about to be installed.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
created_models = self.sync_apps(connection, executor.loader.unmigrated_apps)
else:
created_models = []
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations needed.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.graph.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(" Your models have changes that are not yet reflected in a migration, and so won't be applied."))
self.stdout.write(self.style.NOTICE(" Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them."))
else:
executor.migrate(targets, plan, fake=options.get("fake", False))
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(created_models, self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration, fake=False):
if self.verbosity >= 1:
if action == "apply_start":
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED"))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
elif action == "unapply_start":
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED"))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
def sync_apps(self, connection, app_labels):
"Runs the old syncdb-style operation on a list of app_labels."
cursor = connection.cursor()
try:
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names(cursor)
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=True))
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
create_models = set(itertools.chain(*manifest.values()))
emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=False):
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if self.verbosity >= 3:
self.stdout.write(" Processing %s.%s model\n" % (app_name, model._meta.object_name))
sql, references = connection.creation.sql_create_model(model, no_style(), seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, no_style(), pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, no_style(), pending_references))
if self.verbosity >= 1 and sql:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
# We force a commit here, as that was the previous behaviour.
# If you can prove we don't need this, remove it.
transaction.set_dirty(using=connection.alias)
finally:
cursor.close()
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
try:
# Install custom SQL for the app (but only if this
# is a model we've just created)
if self.verbosity >= 1:
self.stdout.write(" Installing custom SQL...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, no_style(), connection)
if custom_sql:
if self.verbosity >= 2:
self.stdout.write(" Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=connection.alias):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(" Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
if self.show_traceback:
traceback.print_exc()
else:
if self.verbosity >= 3:
self.stdout.write(" No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
if self.verbosity >= 1:
self.stdout.write(" Installing indexes...\n")
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, no_style())
if index_sql:
if self.verbosity >= 2:
self.stdout.write(" Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=connection.alias):
for sql in index_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(" Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
finally:
cursor.close()
# Load initial_data fixtures (unless that has been disabled)
if self.load_initial_data:
for app_label in app_labels:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=connection.alias, skip_validation=True, app_label=app_label, hide_empty=True)
return created_models
def show_migration_list(self, connection, app_names=None):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE)
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .... import io
from .... import compiler
from ....framework import Program
from ....framework import program_guard
from ....framework import Parameter
from ....framework import Variable
from ....executor import Executor
import copy
from collections import Iterable
from ....io import save_inference_model, load_inference_model, save_persistables
import numpy as np
import pickle
import os
__all__ = ['GraphWrapper', 'VarWrapper', 'OpWrapper']
OPTIMIZER_OPS = [
'momentum',
'lars_momentum',
'adagrad',
'adam',
'adamax',
'dpsgd',
'decayed_adagrad',
'adadelta',
'rmsprop',
]
class VarWrapper(object):
def __init__(self, var, graph):
assert isinstance(var, Variable)
assert isinstance(graph, GraphWrapper)
self._var = var
self._graph = graph
def __eq__(self, v):
"""
Overwrite this function for ...in... syntax in python.
"""
return self._var.name == v._var.name
def name(self):
"""
Get the name of the variable.
"""
return self._var.name
def shape(self):
"""
Get the shape of the varibale.
"""
return self._var.shape
def set_shape(self, shape):
"""
Set the shape of the variable.
"""
self._var.desc.set_shape(shape)
def inputs(self):
"""
Get all the operators that use this variable as output.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for op in self._graph.ops():
if self in op.all_inputs():
ops.append(op)
return ops
def outputs(self):
"""
Get all the operators that use this variable as input.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for op in self._graph.ops():
if self in op.all_outputs():
ops.append(op)
return ops
class OpWrapper(object):
def __init__(self, op, graph):
assert isinstance(graph, GraphWrapper)
self._op = op
self._graph = graph
def __eq__(self, op):
"""
Overwrite this function for ...in... syntax in python.
"""
return self.idx() == op.idx()
def all_inputs(self):
"""
Get all the input variables of this operator.
"""
return [
self._graph.var(var_name) for var_name in self._op.input_arg_names
]
def all_outputs(self):
"""
Get all the output variables of this operator.
"""
return [
self._graph.var(var_name) for var_name in self._op.output_arg_names
]
def idx(self):
"""
Get the id of this operator.
"""
return self._op.idx
def type(self):
"""
Get the type of this operator.
"""
return self._op.type
def is_bwd_op(self):
"""
Whether this operator is backward op.
"""
return self.type().endswith('_grad')
def is_opt_op(self):
"""
Whether this operator is optimizer op.
"""
return self.type() in OPTIMIZER_OPS
def inputs(self, name):
"""
Get all the varibales by the input name.
"""
return [self._graph.var(var_name) for var_name in self._op.input(name)]
def outputs(self, name):
"""
Get all the varibales by the output name.
"""
return [self._graph.var(var_name) for var_name in self._op.output(name)]
def set_attr(self, key, value):
"""
Set the value of attribute by attribute's name.
Args:
key(str): the attribute name.
value(bool|int|str|float|list): the value of the attribute.
"""
self._op._set_attr(key, value)
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self._op.attr(name)
class GraphWrapper(object):
"""
It is a wrapper of paddle.fluid.framework.IrGraph with some special functions
for paddle slim framework.
"""
def __init__(self, program=None, in_nodes=[], out_nodes=[]):
"""
Args:
program(framework.Program): A program with
in_nodes(dict): A dict to indicate the input nodes of the graph.
The key is user-defined and human-readable name.
The value is the name of Variable.
out_nodes(dict): A dict to indicate the input nodes of the graph.
The key is user-defined and human-readable name.
The value is the name of Variable.
"""
super(GraphWrapper, self).__init__()
self.program = Program() if program is None else program
self.persistables = {}
self.teacher_persistables = {}
for var in self.program.list_vars():
if var.persistable:
self.persistables[var.name] = var
self.compiled_graph = None
in_nodes = [] if in_nodes is None else in_nodes
out_nodes = [] if out_nodes is None else out_nodes
self.in_nodes = OrderedDict(in_nodes)
self.out_nodes = OrderedDict(out_nodes)
self._attrs = OrderedDict()
def all_parameters(self):
"""
Get all the parameters in this graph.
Returns:
list<VarWrapper>: A list of VarWrapper instances.
"""
params = []
for block in self.program.blocks:
for param in block.all_parameters():
params.append(VarWrapper(param, self))
return params
def is_parameter(self, var):
"""
Whether the given variable is parameter.
Args:
var(VarWrapper): The given varibale.
"""
return isinstance(var._var, Parameter)
def is_persistable(self, var):
"""
Whether the given variable is persistable.
Args:
var(VarWrapper): The given varibale.
"""
return var._var.persistable
def compile(self, for_parallel=True, for_test=False, mem_opt=False):
"""
Compile the program in this wrapper to framework.CompiledProgram for next running.
This function must be called if the program is modified.
Args:
for_parallel(bool): Whether the program to run in data parallel way. default: True.
for_test(bool): Whether the compiled program is used for test.
"""
target = self.program
if for_test:
loss = None
else:
loss = self.out_nodes['loss']
if for_parallel:
# disable memory optimize for stable training
build_strategy = compiler.BuildStrategy()
build_strategy.enable_inplace = mem_opt
build_strategy.memory_optimize = mem_opt
build_strategy.fuse_all_reduce_ops = False
# build_strategy.async_mode = False
self.compiled_graph = compiler.CompiledProgram(
target).with_data_parallel(
loss_name=loss, build_strategy=build_strategy)
else:
self.compiled_graph = compiler.CompiledProgram(target)
def ops(self):
"""
Return all operator nodes included in the graph as a set.
"""
ops = []
for block in self.program.blocks:
for op in block.ops:
ops.append(OpWrapper(op, self))
return ops
def vars(self):
"""
Get all the variables.
"""
return [VarWrapper(var, self) for var in self.program.list_vars()]
def var(self, name):
"""
Get the variable by variable name.
"""
return VarWrapper(self.program.global_block().var(name), self)
def clone(self, for_test=False):
"""
Clone a new graph from current graph.
Returns:
(GraphWrapper): The wrapper of a new graph.
"""
return GraphWrapper(
self.program.clone(for_test),
copy.deepcopy(self.in_nodes), copy.deepcopy(self.out_nodes))
def merge(self, graph):
"""
Merge a graph into current graph.
Args:
graph(GraphWrapper): The graph to be merged by current graph.
"""
for var in graph.program.list_vars():
if var.persistable:
self.teacher_persistables[var.name] = var
new_var = self.program.global_block()._clone_variable(
var, force_persistable=False)
new_var.stop_gradient = var.stop_gradient
# TODO: parameters should be cloned
for op in graph.ops():
op = op._op
inputs = {}
outputs = {}
attrs = {}
for input_name in op.input_names:
inputs[input_name] = [
self.var(in_var_name)._var
for in_var_name in op.input(input_name)
]
for output_name in op.output_names:
outputs[output_name] = [
self.var(out_var_name)._var
for out_var_name in op.output(output_name)
]
for attr_name in op.attr_names:
attrs[attr_name] = op.attr(attr_name)
self.program.global_block().append_op(
type=op.type, inputs=inputs, outputs=outputs, attrs=attrs)
def program(self):
"""
Get the program in current wrapper.
"""
return self.program
def pre_ops(self, op):
"""
Get all the previous operators of target operator.
Args:
op(OpWrapper): Target operator..
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for p in self.ops():
for in_var in op.all_inputs():
if in_var in p.all_outputs():
ops.append(p)
return ops
def next_ops(self, op):
"""
Get all the next operators of target operator.
Args:
op(OpWrapper): Target operator..
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for p in self.ops():
for out_var in op.all_outputs():
if out_var in p.all_inputs():
ops.append(p)
return ops
def get_param_by_op(self, op):
"""
Get the parameters used by target operator.
"""
assert isinstance(op, OpWrapper)
params = []
for var in op.all_inputs():
if isinstance(var._var, Parameter):
params.append(var)
assert len(params) > 0
return params
def numel_params(self):
"""
Get the number of elements in all parameters.
"""
ret = 0
for param in self.all_parameters():
ret += np.product(param.shape())
return ret
def get_optimize_graph(self, optimizer, place, scope, no_grad_var_names=[]):
"""
Get a new graph for training by appending some backward operators and optimization operators.
Args:
optimizer: The optimzier used to generate training graph.
place: The place to run the graph.
scope: The scope used to run the graph. Some new variable will be added into this scope.
no_grad_var_names(list<str>): Names of variables that should be ignored while computing gradients. default: [].
Returns:
(GraphWrapper): The wrapper of new graph with backward ops and optimization ops.
"""
graph = self.clone()
startup_program = Program()
with program_guard(
main_program=graph.program, startup_program=startup_program):
target_name = None
if 'loss' in graph.out_nodes:
target_name = graph.out_nodes['loss']
elif 'cost' in graph.out_nodes:
target_name = graph.out_nodes['cost']
else:
return None
target = graph.var(target_name)._var
# The learning rate variable may be created in other program.
# Update information in optimizer to make
# learning rate variable being accessible in current program.
if isinstance(optimizer._learning_rate, Variable):
optimizer._learning_rate_map[
graph.program] = optimizer._learning_rate
optimizer.minimize(target, no_grad_set=no_grad_var_names)
exe = Executor(place)
exe.run(program=startup_program, scope=scope)
return graph
def flops(self, only_conv=False):
"""
Get the flops of current graph.
Args:
only_conv: Only calculating the conv layers. default: False.
Returns:
int: The flops of current graph.
"""
flops = 0
for op in self.ops():
if op.type() in ['conv2d', 'depthwise_conv2d']:
filter_shape = op.inputs("Filter")[0].shape()
input_shape = op.inputs("Input")[0].shape()
output_shape = op.outputs("Output")[0].shape()
c_out, c_in, k_h, k_w = filter_shape
_, _, h_out, w_out = output_shape
groups = op.attr("groups")
kernel_ops = k_h * k_w * (c_in / groups)
if len(op.inputs("Bias")) > 0:
with_bias = 1
else:
with_bias = 0
flops += 2 * h_out * w_out * c_out * (kernel_ops + with_bias)
elif op.type() == 'pool2d' and not only_conv:
input_shape = op.inputs("X")[0].shape()
output_shape = op.outputs("Out")[0].shape()
_, c_out, h_out, w_out = output_shape
k_size = op.attr("ksize")
flops += h_out * w_out * c_out * (k_size[0]**2)
elif op.type() == 'mul' and not only_conv:
x_shape = list(op.inputs("X")[0].shape())
y_shape = op.inputs("Y")[0].shape()
if x_shape[0] == -1:
x_shape[0] = 1
flops += 2 * x_shape[0] * x_shape[1] * y_shape[1]
elif op.type() in ['relu', 'sigmoid', 'batch_norm'
] and not only_conv:
input_shape = list(op.inputs("X")[0].shape())
if input_shape[0] == -1:
input_shape[0] = 1
flops += np.product(input_shape)
return flops
def save_model(self, path, exe):
"""
Save network and parameters into file which can be load by load_inference_model api.
Args:
path(str): The path to save the persistables.
exe(framework.Executor): The executor used to save the persistables.
"""
out_vars = [
self.var(var_name)._var for var_name in self.out_nodes.values()
]
in_vars = list(self.in_nodes.values())
assert (len(in_vars) > 0)
assert (len(out_vars) > 0)
io.save_inference_model(
path,
in_vars,
out_vars,
exe.exe,
model_filename="__model__",
params_filename="__params__",
main_program=self.program.clone(),
export_for_deployment=True)
def save_infer_model(self, path, exe, in_out, program_only=False):
"""
Save network and parameters into file which can be load by load_inference_model api.
Args:
path(str): The path to save the persistables.
exe(framework.Executor): The executor used to save the persistables.
in_out(tuple|list): in_out[0] is a list of input nodes' names
and in_out[1] is a list of output nodes' names.
program_only(bool): Whether to save program only.
"""
out_vars = [self.var(var_name)._var for var_name in in_out[1]]
in_vars = list(in_out[0])
assert (len(in_vars) > 0)
assert (len(out_vars) > 0)
io.save_inference_model(
path,
in_vars,
out_vars,
exe.exe,
model_filename="__model__.infer",
params_filename="__params__",
program_only=program_only,
main_program=self.program.clone(),
export_for_deployment=True)
def save_persistables(self, path, exe):
"""
Save all the persistable variables into file.
Args:
path(str): The path to save the persistables.
exe(framework.Executor): The executor used to save the persistables.
"""
# update persistables from program
for var in self.program.list_vars():
if var.persistable and var.name not in self.persistables:
self.persistables[var.name] = var
persistables = []
for var in self.persistables:
if 'reader' not in var and 'double_buffer' not in var and var not in self.teacher_persistables:
persistables.append(self.persistables[var])
io.save_vars(exe.exe, path, vars=persistables)
def load_persistables(self, path, exe):
"""
Load the persistable variables from file.
Args:
path(str): The path to load the persistables.
exe(framework.Executor): The executor used to load the persistables.
"""
def if_exist(var):
return os.path.exists(os.path.join(path, var.name))
persistables = []
for var in self.persistables:
if 'reader' not in var and 'double_buffer' not in var:
persistables.append(self.persistables[var])
io.load_vars(exe.exe, path, vars=persistables, predicate=if_exist)
def update_param_shape(self, scope):
"""
Update the shape of parameters in the graph according to tensors in scope.
It is used after loading pruned parameters from file.
"""
for param in self.all_parameters():
tensor_shape = np.array(scope.find_var(param.name()).get_tensor(
)).shape
param.set_shape(tensor_shape)
def infer_shape(self):
"""
Update the groups of convolution layer according to current filters.
It is used after loading pruned parameters from file.
"""
for op in self.ops():
if op.type() != 'conditional_block':
op._op.desc.infer_shape(op._op.block.desc)
def update_groups_of_conv(self):
for op in self.ops():
if op.type() == 'depthwise_conv2d' or op.type(
) == 'depthwise_conv2d_grad':
op.set_attr('groups', op.inputs('Filter')[0].shape()[0])
|
|
'''
DotDesktop
======================================
Simple app for creating and editing '.desktop' files.
====================
MIT License
====================
Dependencies
===================
python 2.7
kivi 1.9.0
Author
====================
2015 - Alberto Sola
'''
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.core.window import Window
from kivy.config import Config
from kivy.lang import Builder
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
from kivy.uix.textinput import TextInput
import os
from os.path import expanduser
import stat
# Personalize widgets
TextInput.cursor_color = (0,0,0,1)
# No resizable window
Config.set('graphics', 'resizable', 0)
Config.write()
# Load dialogs gui
Builder.load_file("views/dialogs.kv")
Builder.load_file("views/dotdesktop.kv")
# Window size
window_width = 540
window_height = 250
popup_width = 500
popup_height = window_width
class AlertPopup(Popup):
'''
AlertPopup
==============================================================
This class shows a Popup.
The Popup can display:
- Alert. It only display a message and a cancel button.
- Prompt. If you pass a function to "on_accept" it will
display a second button, "OK" that will execute
the callback.
- Both can display an extra message if you pas a string
to "msg".
@param string title
@param string msg [ optional ]
@param function on_accept [ optional ]
'''
def __init__(self, title, msg = '', on_accept = None, **kwargs ):
super(AlertPopup, self).__init__( **kwargs )
# Define object vars
self.title = title
self.size_hint = (0.5,0.6)
self.accept_callback = on_accept
# On dismiss
#self.bind( on_dismiss = self.close )
# Principal layout
layout = BoxLayout(orientation='vertical', padding=1)
# Close button
btn_close = Button( text='Close' )
btn_close.bind(on_release=self.close )
# Check if there is a message
if msg != '':
label = Label(text=msg)
layout.add_widget(label)
else:
layout.size_hint = (0.9, 0.9)
# Check if it's a prompt
if self.accept_callback != None:
button_layout = BoxLayout(spacing=1)
btn_accept = Button(text='Ok')
btn_accept.bind(on_release=self.accept)
button_layout.add_widget(btn_accept)
button_layout.add_widget(btn_close)
layout.add_widget(button_layout)
else:
layout.add_widget(btn_close)
# Create Popup
self.add_widget(layout)
self.open()
def close( self, instance ):
'''
Closes the popup.
'''
self.dismiss()
def accept(self, instance):
'''
Closes the poppup and calls the callback.
'''
self.close(instance)
self.accept_callback(instance)
class OpenFileDialog( BoxLayout ):
'''
Widget that contains a FileChooser where
the user can select a file.
- open_file: function callback when press the open button.
- close_dialog: function callback that closes the popup.
'''
# Open button action
open_file = ObjectProperty( None )
# Close button action
close_dialog = ObjectProperty( None )
class DotDesktop(BoxLayout):
'''
//////////////////////////
// //
// Main Screen Widget //
// //
//////////////////////////
'''
# Get TextInput widgets from .kv
txt_input_name = ObjectProperty(None)
txt_input_desc = ObjectProperty(None)
txt_input_path = ObjectProperty(None)
txt_input_exec = ObjectProperty(None)
txt_input_icon = ObjectProperty(None)
def default_size(self, instance = ObjectProperty(None)):
# Initial window size
Window.size = (window_width,window_height);
def close_popup(self, instance = ObjectProperty(None)):
# Closes popup and restores size
self.default_size()
self.popup.dismiss()
def show_popup(self, title, content):
# Create a popup that contains the view
self.popup = Popup(title=title, content=content,
size_hint=(0.9, 0.9))
# On dismiss
self.popup.bind( on_dismiss = self.default_size )
# Show the popup
self.popup.open()
# Resize the window
Window.size = (popup_height,popup_width)
def open_file( self ):
# Open file button action
# Load the FileChooser inside a Popup
content = OpenFileDialog( open_file=self.read_file ,close_dialog = self.close_popup )
# Get the user dir
content.ids.open_file_chooser.path = expanduser('~')
self.show_popup( "Open file", content )
def read_file( self, files ):
# Read the .desktop file
# Open file
file_txt = open( files[0] )
# Parse
file_dic = {}
for line in file_txt:
# Get a new line and split
line_part = line.split("=")
# If there are two parts
if len(line_part) == 2:
# Add new key to the dict.
# You can access every '.dektop' property with
# its name in lowercase and no spaces.
file_dic[ line_part[0].replace(" ", "").lower() ] = line_part[1]
# Set TextInput text
self.txt_input_path.text = files[0]
self.txt_input_name.text = file_dic.get("name","")
self.txt_input_comment.text = file_dic.get("comment","")
self.txt_input_exec.text = file_dic.get("exec","")
self.txt_input_icon.text = file_dic.get("icon","")
# Close file and Popup
file_txt.close()
self.close_popup()
def select_file(self, text_input ):
# Open FileChooser and set TextInput text
self.text_input = text_input
# Load the FileChooser inside a Popup
content = OpenFileDialog( open_file=self.set_textinput, close_dialog = self.close_popup )
# Get the user dir
content.ids.open_file_chooser.path = expanduser('~')
# Show Popup
self.show_popup( "Select file", content )
def set_textinput(self, files):
# Set TextInput text from FileChooser
self.text_input.text = files[0]
self.close_popup()
def check_before_save(self, instance):
# Save the file
# Set file path
self.file_path = self.txt_input_path.text
# Check if file path is empty
if self.file_path == '':
AlertPopup("File path empty!")
# Check if file exists
elif os.path.isfile( self.file_path ):
AlertPopup("Caution","Overwrite file?", self.save_file )
# Save file
else:
self.save_file( instance )
def save_file( self, instance ):
# Write file data
try:
output_file = open( self.file_path, 'w' )
output_file.write("[Desktop Entry]\n")
output_file.write("Type=Application\n")
output_file.write("Name=" + self.txt_input_name.text.replace('\n', "") + '\n' )
output_file.write("Comment=" + self.txt_input_comment.text.replace('\n', "") + '\n' )
output_file.write("Icon=" + self.txt_input_icon.text.replace('\n', "") + '\n' )
output_file.write("Exec=" + self.txt_input_exec.text.replace('\n', "") + '\n')
output_file.close()
# Set execute permissions
file_mode = os.stat( self.file_path )
os.chmod( self.file_path, file_mode.st_mode | stat.S_IEXEC )
#self.show_popup( "File saved!", AlertPopup() )
AlertPopup("File saved!")
# Manage exceptions
except IOError:
AlertPopup("Error while saving", "File can't be saved.")
# Main class
class DotDesktopApp(App):
def build(self):
# Window size
Window.size = (window_width, window_height)
# Window background color
Window.clearcolor = (0.20,0.20,0.20,1)
# Run ScreenManager
return DotDesktop()
if __name__ == '__main__':
# Run app
DotDesktopApp().run()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from keystone.common import extension
from keystone.common import json_home
from keystone.common import wsgi
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json'
MEDIA_TYPE_XML = 'application/vnd.openstack.identity-%s+xml'
_VERSIONS = []
# NOTE(blk-u): latest_app will be set by keystone.service.loadapp(). It gets
# set to the application that was just loaded. In the case of keystone-all,
# loadapp() gets called twice, once for the public app and once for the admin
# app. In the case of httpd/keystone, loadapp() gets called once for the public
# app if this is the public instance or loadapp() gets called for the admin app
# if it's the admin instance.
# This is used to fetch the /v3 JSON Home response. The /v3 JSON Home response
# is the same whether it's the admin or public service so either admin or
# public works.
latest_app = None
def request_v3_json_home(new_prefix):
if 'v3' not in _VERSIONS:
# No V3 support, so return an empty JSON Home document.
return {'resources': {}}
req = webob.Request.blank(
'/v3', headers={'Accept': 'application/json-home'})
v3_json_home_str = req.get_response(latest_app).body
v3_json_home = jsonutils.loads(v3_json_home_str)
json_home.translate_urls(v3_json_home, new_prefix)
return v3_json_home
class Extensions(wsgi.Application):
"""Base extensions controller to be extended by public and admin API's."""
# extend in subclass to specify the set of extensions
@property
def extensions(self):
return None
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
def get_extension_info(self, context, extension_alias):
try:
return {'extension': self.extensions[extension_alias]}
except KeyError:
raise exception.NotFound(target=extension_alias)
class AdminExtensions(Extensions):
@property
def extensions(self):
return extension.ADMIN_EXTENSIONS
class PublicExtensions(Extensions):
@property
def extensions(self):
return extension.PUBLIC_EXTENSIONS
def register_version(version):
_VERSIONS.append(version)
class MimeTypes:
JSON = 'application/json'
JSON_HOME = 'application/json-home'
def v3_mime_type_best_match(context):
# accept_header is a WebOb MIMEAccept object so supports best_match.
accept_header = context['accept_header']
if not accept_header:
return MimeTypes.JSON
SUPPORTED_TYPES = [MimeTypes.JSON, MimeTypes.JSON_HOME]
return accept_header.best_match(SUPPORTED_TYPES)
class Version(wsgi.Application):
def __init__(self, version_type, routers=None):
self.endpoint_url_type = version_type
self._routers = routers
super(Version, self).__init__()
def _get_identity_url(self, context, version):
"""Returns a URL to keystone's own endpoint."""
url = self.base_url(context, self.endpoint_url_type)
return '%s/%s/' % (url, version)
def _get_versions_list(self, context):
"""The list of versions is dependent on the context."""
versions = {}
if 'v2.0' in _VERSIONS:
versions['v2.0'] = {
'id': 'v2.0',
'status': 'stable',
'updated': '2014-04-17T00:00:00Z',
'links': [
{
'rel': 'self',
'href': self._get_identity_url(context, 'v2.0'),
}, {
'rel': 'describedby',
'type': 'text/html',
'href': 'http://docs.openstack.org/'
}
],
'media-types': [
{
'base': 'application/json',
'type': MEDIA_TYPE_JSON % 'v2.0'
}, {
'base': 'application/xml',
'type': MEDIA_TYPE_XML % 'v2.0'
}
]
}
if 'v3' in _VERSIONS:
versions['v3'] = {
'id': 'v3.0',
'status': 'stable',
'updated': '2013-03-06T00:00:00Z',
'links': [
{
'rel': 'self',
'href': self._get_identity_url(context, 'v3'),
}
],
'media-types': [
{
'base': 'application/json',
'type': MEDIA_TYPE_JSON % 'v3'
}, {
'base': 'application/xml',
'type': MEDIA_TYPE_XML % 'v3'
}
]
}
return versions
def get_versions(self, context):
req_mime_type = v3_mime_type_best_match(context)
if req_mime_type == MimeTypes.JSON_HOME:
v3_json_home = request_v3_json_home('/v3')
return wsgi.render_response(
body=v3_json_home,
headers=(('Content-Type', MimeTypes.JSON_HOME),))
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
'values': versions.values()
}
})
def get_version_v2(self, context):
versions = self._get_versions_list(context)
if 'v2.0' in _VERSIONS:
return wsgi.render_response(body={
'version': versions['v2.0']
})
else:
raise exception.VersionNotFound(version='v2.0')
def _get_json_home_v3(self):
def all_resources():
for router in self._routers:
for resource in router.v3_resources:
yield resource
return {
'resources': dict(all_resources())
}
def get_version_v3(self, context):
versions = self._get_versions_list(context)
if 'v3' in _VERSIONS:
req_mime_type = v3_mime_type_best_match(context)
if req_mime_type == MimeTypes.JSON_HOME:
return wsgi.render_response(
body=self._get_json_home_v3(),
headers=(('Content-Type', MimeTypes.JSON_HOME),))
return wsgi.render_response(body={
'version': versions['v3']
})
else:
raise exception.VersionNotFound(version='v3')
|
|
#!/usr/bin/env python
"""
# =============================================================================
Copyright Government of Canada 2015-2016
Written by: Eric Marinier, Public Health Agency of Canada,
National Microbiology Laboratory
Funded by the National Micriobiology Laboratory and the Genome Canada / Alberta
Innovates Bio Solutions project "Listeria Detection and Surveillance
using Next Generation Genomics"
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# =============================================================================
"""
import drmaa
import os
import sys
import StringIO
from TestingUtility import *
prepareSystemPath()
from neptune.JobManager import *
from neptune.JobManagerDRMAA import JobManagerDRMAA
from neptune.JobManagerParallel import JobManagerParallel
import neptune.CountKMers as CountKMers
import neptune.AggregateKMers as AggregateKMers
import neptune.ExtractSignatures as ExtractSignatures
import neptune.FilterSignatures as FilterSignatures
import neptune.Utility as Utility
import unittest
class TestDRMAAConstructor(unittest.TestCase):
def test_no_defaults(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
defaultSpecification = None
jobManager = JobManagerDRMAA(
outputDirectoryLocation, logDirectoryLocation, session, defaultSpecification)
self.assertEquals(jobManager.session, session)
self.assertEquals(jobManager.outputDirectoryLocation, outputDirectoryLocation)
self.assertEquals(jobManager.logDirectoryLocation, logDirectoryLocation)
self.assertEquals(jobManager.countSpecification, defaultSpecification)
self.assertEquals(jobManager.aggregateSpecification, defaultSpecification)
self.assertEquals(jobManager.extractSpecification, defaultSpecification)
self.assertEquals(jobManager.databaseSpecification, defaultSpecification)
self.assertEquals(jobManager.filterSpecification, defaultSpecification)
def test_defaults(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
defaultSpecification = "-l h_vmem=16G -pe smp 8"
jobManager = JobManagerDRMAA(
outputDirectoryLocation, logDirectoryLocation, session, defaultSpecification)
self.assertEquals(jobManager.session, session)
self.assertEquals(jobManager.outputDirectoryLocation, outputDirectoryLocation)
self.assertEquals(jobManager.countSpecification, defaultSpecification)
self.assertEquals(jobManager.aggregateSpecification, defaultSpecification)
self.assertEquals(jobManager.extractSpecification, defaultSpecification)
self.assertEquals(jobManager.databaseSpecification, defaultSpecification)
self.assertEquals(jobManager.filterSpecification, defaultSpecification)
class TestParallelConstructor(unittest.TestCase):
def test_simple(self):
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
jobManager = JobManagerParallel(outputDirectoryLocation, logDirectoryLocation)
self.assertEquals(jobManager.outputDirectoryLocation, outputDirectoryLocation)
self.assertEquals(jobManager.logDirectoryLocation, logDirectoryLocation)
class TestSetCount(unittest.TestCase):
def test_simple(self):
specification = "-l h_vmem=16G -pe smp 8"
with drmaa.Session() as session:
jobManager = JobManagerDRMAA(getPath("tests/output/manager"), getPath("tests/output/manager/log"), session, None)
self.assertEquals(jobManager.countSpecification, None)
jobManager.setCountSpecification(specification)
self.assertEquals(jobManager.countSpecification, specification)
class TestSetAggregate(unittest.TestCase):
def test_simple(self):
specification = "-l h_vmem=16G -pe smp 8"
with drmaa.Session() as session:
jobManager = JobManagerDRMAA(getPath("tests/output/manager"), getPath("tests/output/manager/log"), session, None)
self.assertEquals(jobManager.aggregateSpecification, None)
jobManager.setAggregateSpecification(specification)
self.assertEquals(jobManager.aggregateSpecification, specification)
class TestSetExtract(unittest.TestCase):
def test_simple(self):
specification = "-l h_vmem=16G -pe smp 8"
with drmaa.Session() as session:
jobManager = JobManagerDRMAA(getPath("tests/output/manager"), getPath("tests/output/manager/log"), session, None)
self.assertEquals(jobManager.extractSpecification, None)
jobManager.setExtractSpecification(specification)
self.assertEquals(jobManager.extractSpecification, specification)
class TestSetDatabase(unittest.TestCase):
def test_simple(self):
specification = "-l h_vmem=16G -pe smp 8"
with drmaa.Session() as session:
jobManager = JobManagerDRMAA(getPath("tests/output/manager"), getPath("tests/output/manager/log"), session, None)
self.assertEquals(jobManager.databaseSpecification, None)
jobManager.setDatabaseSpecification(specification)
self.assertEquals(jobManager.databaseSpecification, specification)
class TestSetFilter(unittest.TestCase):
def test_simple(self):
specification = "-l h_vmem=16G -pe smp 8"
with drmaa.Session() as session:
jobManager = JobManagerDRMAA(getPath("tests/output/manager"), getPath("tests/output/manager/log"), session, None)
self.assertEquals(jobManager.filterSpecification, None)
jobManager.setFilterSpecification(specification)
self.assertEquals(jobManager.filterSpecification, specification)
class TestSetConsolidate(unittest.TestCase):
def test_simple(self):
specification = "-l h_vmem=16G -pe smp 8"
with drmaa.Session() as session:
jobManager = JobManagerDRMAA(getPath("tests/output/manager"), getPath("tests/output/manager/log"), session, None)
self.assertEquals(jobManager.consolidateSpecification, None)
jobManager.setConsolidateSpecification(specification)
self.assertEquals(jobManager.consolidateSpecification, specification)
class TestRunJobs(unittest.TestCase):
def test_simple(self):
outputDirectoryLocation = getPath("tests/output/manager/output")
logDirectoryLocation = getPath("tests/output/manager/log")
jobManager = JobManagerParallel(outputDirectoryLocation, logDirectoryLocation)
inputLocation = getPath("tests/data/manager/simple.fasta")
outputLocation = getPath("tests/output/manager/temp.out")
k = 7
organization = 0
job = jobManager.createCountJob(inputLocation, outputLocation, k, organization)
jobManager.runJobs([job])
with open (outputLocation, "r") as myfile:
result = myfile.read()
expected = "ACGTACG 4\nGTACGTA 2\n"
self.assertEquals(result, expected)
os.remove(outputLocation)
class TestCreateJob(unittest.TestCase):
def test_simple(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
defaultSpecification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, defaultSpecification)
job = jobManager.createJob()
self.assertTrue(job)
class TestCreateCountJob(unittest.TestCase):
def test_simple(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output")
logDirectoryLocation = getPath("tests/output/log")
specification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, None)
jobManager.setCountSpecification(specification)
inputLocation = "tests/data/manager/simple.fasta"
outputLocation = getPath("tests/output/manager/temp.out")
k = 7
organization = 0
job = jobManager.createCountJob(inputLocation, outputLocation, k, organization)
args = [
CountKMers.INPUT_LONG, str(inputLocation),
CountKMers.OUTPUT_LONG, str(outputLocation),
CountKMers.KMER_LONG, str(k),
CountKMers.ORGANIZATION_LONG, str(organization)]
self.assertEquals(job.outputPath, ":" + os.path.join(logDirectoryLocation, "Neptune-CountKMers1.o"))
self.assertEquals(job.errorPath, ":" + os.path.join(logDirectoryLocation, "Neptune-CountKMers1.e"))
self.assertEquals(job.args[1:], args)
self.assertEquals(job.nativeSpecification, specification)
class TestCreateAggregateJob(unittest.TestCase):
def test_simple(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
specification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, None)
jobManager.setAggregateSpecification(specification)
inclusionLocations = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
exclusionLocations = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
outputLocation = getPath("tests/output/manager/temp.out")
tag = "A"
job = jobManager.createAggregateJob(inclusionLocations, exclusionLocations, outputLocation, tag)
args = [
AggregateKMers.INCLUSION_LONG, "tests/data/manager/simple.fasta" + "." + tag, "tests/data/manager/alternative.fasta" + "." + tag,
AggregateKMers.EXCLUSION_LONG, "tests/data/manager/simple.fasta" + "." + tag, "tests/data/manager/alternative.fasta" + "." + tag,
AggregateKMers.OUTPUT_LONG, outputLocation,
AggregateKMers.DELETE_LONG]
self.assertEquals(job.outputPath, ":" + os.path.join(logDirectoryLocation, "Neptune-AggregateKMers1.o"))
self.assertEquals(job.errorPath, ":" + os.path.join(logDirectoryLocation, "Neptune-AggregateKMers1.e"))
self.assertEquals(job.args[1:], args)
self.assertEquals(job.nativeSpecification, specification)
def test_no_tag(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
specification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, None)
jobManager.setAggregateSpecification(specification)
inclusionLocations = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
exclusionLocations = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
outputLocation = getPath("tests/output/manager/temp.out")
tag = None
job = jobManager.createAggregateJob(inclusionLocations, exclusionLocations, outputLocation, tag)
args = [
AggregateKMers.INCLUSION_LONG, "tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta",
AggregateKMers.EXCLUSION_LONG, "tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta",
AggregateKMers.OUTPUT_LONG, outputLocation,
AggregateKMers.DELETE_LONG]
self.assertEquals(job.outputPath, ":" + os.path.join(logDirectoryLocation, "Neptune-AggregateKMers1.o"))
self.assertEquals(job.errorPath, ":" + os.path.join(logDirectoryLocation, "Neptune-AggregateKMers1.e"))
self.assertEquals(job.args[1:], args)
self.assertEquals(job.nativeSpecification, specification)
class TestCreateExtractJob(unittest.TestCase):
def test_simple(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
specification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, None)
jobManager.setExtractSpecification(specification)
referenceLocation = "tests/data/manager/simple.fasta"
referenceSize = 12
rate = 0.01
inclusion = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
inhits = 2
exclusion = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
exhits = 2
gap = 3
size = 5
GC = 0.5
confidence = 0.95
aggregateLocation = "tests/data/manager/simple.kmers"
outputLocation = getPath("tests/output/manager/temp.out")
job = jobManager.createExtractJob(referenceLocation, referenceSize, rate, inclusion, inhits,
exclusion, exhits, gap, size, GC, confidence, aggregateLocation, outputLocation)
args = [
ExtractSignatures.REFERENCE_LONG, str(referenceLocation),
ExtractSignatures.REFERENCE_SIZE_LONG, str(referenceSize),
ExtractSignatures.RATE_LONG, str(rate),
ExtractSignatures.INCLUSION_LONG, "tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta",
ExtractSignatures.INHITS_LONG, str(inhits),
ExtractSignatures.EXCLUSION_LONG, "tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta",
ExtractSignatures.EXHITS_LONG, str(exhits),
ExtractSignatures.GAP_LONG, str(gap),
ExtractSignatures.SIZE_LONG, str(size),
ExtractSignatures.GC_LONG, str(GC),
ExtractSignatures.CONFIDENCE_LONG, str(confidence),
ExtractSignatures.KMERS_LONG, aggregateLocation,
ExtractSignatures.OUTPUT_LONG, outputLocation]
self.assertEquals(job.outputPath, ":" + os.path.join(logDirectoryLocation, "Neptune-ExtractSignatures1.o"))
self.assertEquals(job.errorPath, ":" + os.path.join(logDirectoryLocation, "Neptune-ExtractSignatures1.e"))
self.assertEquals(job.args[1:], args)
self.assertEquals(job.nativeSpecification, specification)
class TestCreateDatabaseJob(unittest.TestCase):
def test_simple(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
specification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, None)
jobManager.setDatabaseSpecification(specification)
inputLocations = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
aggregatedLocation = getPath("tests/output/manager/aggregated.out")
outputLocation = getPath("tests/output/manager/temp.out")
job = jobManager.createDatabaseJob(inputLocations, aggregatedLocation, outputLocation)
args = [
"-dbtype", "nucl",
"-in", aggregatedLocation,
"-title", "DATABASE",
"-out", outputLocation]
self.assertEquals(job.outputPath, ":" + os.path.join(logDirectoryLocation, "Neptune-CreateDatabase1.o"))
self.assertEquals(job.errorPath, ":" + os.path.join(logDirectoryLocation, "Neptune-CreateDatabase1.e"))
self.assertEquals(job.args, args)
self.assertEquals(job.nativeSpecification, specification)
class TestCreateFilterJob(unittest.TestCase):
def test_simple(self):
with drmaa.Session() as session:
outputDirectoryLocation = getPath("tests/output/manager")
logDirectoryLocation = getPath("tests/output/manager/log")
specification = "-l h_vmem=2G -pe smp 1"
jobManager = JobManagerDRMAA(outputDirectoryLocation, logDirectoryLocation, session, None)
jobManager.setFilterSpecification(specification)
inclusionDatabaseLocation = "tests/data/manager/FAKE_IN_DB.FAKE"
exclusionDatabaseLocation = "tests/data/manager/FAKE_EX_DB.FAKE"
inclusion = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
exclusion = ["tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta"]
inputLocation = "tests/data/manager/simple.fasta"
filteredOutputLocation = getPath("tests/output/manager/simple.filtered")
sortedOutputLocation = getPath("tests/output/manager/simple.sorted")
filterLength = 0.5
filterPercent = 0.5
seedSize = 11
job = jobManager.createFilterJob(inclusionDatabaseLocation, exclusionDatabaseLocation,
inclusion, exclusion, inputLocation, filteredOutputLocation, sortedOutputLocation,
filterLength, filterPercent, seedSize)
args = [
FilterSignatures.INCLUSION_DATABASE_LONG, str(inclusionDatabaseLocation),
FilterSignatures.EXCLUSION_DATABASE_LONG, str(exclusionDatabaseLocation),
FilterSignatures.INCLUSION_LONG, "tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta",
FilterSignatures.EXCLUSION_LONG, "tests/data/manager/simple.fasta", "tests/data/manager/alternative.fasta",
FilterSignatures.INPUT_LONG, str(inputLocation),
FilterSignatures.FILTERED_OUTPUT_LONG, str(filteredOutputLocation),
FilterSignatures.SORTED_OUTPUT_LONG, str(sortedOutputLocation),
FilterSignatures.FILTER_LENGTH_LONG, str(filterLength),
FilterSignatures.FILTER_PERCENT_LONG, str(filterPercent),
FilterSignatures.SEED_SIZE_LONG, str(seedSize)]
self.assertEquals(job.outputPath, ":" + os.path.join(logDirectoryLocation, "Neptune-FilterSignatures1.o"))
self.assertEquals(job.errorPath, ":" + os.path.join(logDirectoryLocation, "Neptune-FilterSignatures1.e"))
self.assertEquals(job.args[1:], args)
self.assertEquals(job.nativeSpecification, specification)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates import galactocentric_frame_defaults
from astropy.coordinates.distances import Distance
from astropy.coordinates.builtin_frames import (
ICRS, FK5, FK4, FK4NoETerms, Galactic, CIRS,
Supergalactic, Galactocentric, HCRS, GCRS, LSR)
from astropy.coordinates import SkyCoord
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.coordinates import EarthLocation, CartesianRepresentation
from astropy.time import Time
from astropy.units import allclose
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650),
(10.0004738, 40.9952444), (121.1744050, -21.5729360)]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys())
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950')))
assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys())
assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time('J2000')
b1950 = Time('B1950')
j1975 = Time('J1975')
b1975 = Time('B1975')
fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1*u.deg, dec=2*u.deg)
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.e-10
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4NoETerms()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg,
dec=np.linspace(-90, 90, 10)*u.deg,
distance=1.*u.kpc)
g_xyz = icrs_coord.transform_to(Galactic()).cartesian.xyz
with galactocentric_frame_defaults.set('pre-v4.0'):
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc)
assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc)
# generate some test coordinates
g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg,
distance=[np.sqrt(2)]*4*u.kpc)
with galactocentric_frame_defaults.set('pre-v4.0'):
xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz
true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10., 10., 100) * u.kpc
y = np.linspace(-10., 10., 100) * u.kpc
z = np.zeros_like(x)
# from Galactic to Galactocentric
l = np.linspace(15, 30., 100) * u.deg
b = np.linspace(-10., 10., 100) * u.deg
d = np.ones_like(l.value) * u.kpc
with galactocentric_frame_defaults.set('latest'):
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1),
z=z.reshape(100, 1, 1))
g1t = g1.transform_to(Galactic())
g2t = g2.transform_to(Galactic())
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1))
g1t = g1.transform_to(Galactocentric())
g2t = g2.transform_to(Galactocentric())
np.testing.assert_almost_equal(g1t.cartesian.xyz.value,
g2t.cartesian.xyz.value[:, :, 0, 0])
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
assert allclose(npole.transform_to(Supergalactic()).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree)
lon0_gal = lon0.transform_to(Galactic())
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (https://ui.adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree)
icrs = SkyCoord('18h50m27s +31d57m17s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree)
icrs = SkyCoord('17h51m36s -25d18m52s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS():
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg,
dec=-22.36943723*u.deg,
distance=406615.66347377*u.km)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg,
dec=[-22.36943605, -25.07431079]*u.deg,
distance=[406615.66347377, 375484.13558956]*u.km)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km),
obstime=self.t1)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5*u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
class TestHelioBaryCentric():
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup(self):
wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz -
helio_slalib)**2).sum()) < 14. * u.km
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz -
bary_slalib)**2).sum()) < 14. * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
lsr = icrs.transform_to(LSR())
lsr_diff = lsr.data.differentials['s']
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic()).cartesian.xyz
assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()),
lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
icrs = lsr.transform_to(ICRS())
icrs_diff = icrs.data.differentials['s']
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic()).cartesian.xyz
assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()),
-lsr.v_bary.d_xyz)
def test_hcrs_icrs_differentials():
# Regression to ensure that we can transform velocities from HCRS to LSR.
# Numbers taken from the original issue, gh-6835.
hcrs = HCRS(ra=8.67*u.deg, dec=53.09*u.deg, distance=117*u.pc,
pm_ra_cosdec=4.8*u.mas/u.yr, pm_dec=-15.16*u.mas/u.yr,
radial_velocity=23.42*u.km/u.s)
icrs = hcrs.transform_to(ICRS())
# The position and velocity should not change much
assert allclose(hcrs.cartesian.xyz, icrs.cartesian.xyz, rtol=1e-8)
assert allclose(hcrs.velocity.d_xyz, icrs.velocity.d_xyz, rtol=1e-2)
hcrs2 = icrs.transform_to(HCRS())
# The values should round trip
assert allclose(hcrs.cartesian.xyz, hcrs2.cartesian.xyz, rtol=1e-12)
assert allclose(hcrs.velocity.d_xyz, hcrs2.velocity.d_xyz, rtol=1e-12)
def test_cirs_icrs():
"""
Test CIRS<->ICRS transformations, including self transform
"""
t = Time("J2010")
MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST)
loc = EarthLocation(lat=0*u.deg, lon=0*u.deg)
cirs_geo_frame = CIRS(obstime=t)
cirs_topo_frame = CIRS(obstime=t, location=loc)
moon_geo = cirs_geo_frame.realize_frame(MOONDIST_CART)
moon_topo = moon_geo.transform_to(cirs_topo_frame)
# now check that the distance change is similar to earth radius
assert 1000*u.km < np.abs(moon_topo.distance - moon_geo.distance).to(u.au) < 7000*u.km
# now check that it round-trips
moon2 = moon_topo.transform_to(moon_geo)
assert_allclose(moon_geo.cartesian.xyz, moon2.cartesian.xyz)
# now check ICRS transform gives a decent distance from Barycentre
moon_icrs = moon_geo.transform_to(ICRS())
assert_allclose(moon_icrs.distance - 1*u.au, 0.0*u.R_sun, atol=3*u.R_sun)
|
|
import os
import sys
import time
import lcm
import functools
import threading
import glob
import json
import re
import select
import numpy as np
from ddapp import lcmspy as spy
VIDEO_LCM_URL = 'udpm://239.255.76.50:7650?ttl=1'
class FieldData(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = self.__dict__.keys()
return 'FieldData(%s)' % ', '.join(['%s=%r' % (k,v) for k, v in self.__dict__.iteritems()])
def getRecentUtimes(utimeMap, seconds):
utimes = np.array(utimeMap.keys())
if not len(utimes):
return None
utimes.sort()
endTime = utimes[-1]
startTime = max(0, endTime - seconds*1e6)
startIndex = utimes.searchsorted(startTime)
utimes = utimes[startIndex:]
if not len(utimes):
return None
return utimes
class LCMPoller(object):
def __init__(self, lc):
self.lc = lc
self.poll = select.poll()
self.poll.register(self.lc.fileno())
def handleLCM(self, timeout=100):
if self.poll.poll(timeout):
self.lc.handle()
class LogLookup(object):
def __init__(self):
self.utimeMap = None
self.logs = {}
def setUtimeMap(self, utimeMap):
self.utimeMap = utimeMap
def getImage(self, utime):
filename, filepos = self.utimeMap[utime]
log = self.logs.get(filename)
if log is None:
log = lcm.EventLog(filename, 'r')
self.logs[filename] = log
log.seek(filepos)
event = log.read_next_event()
msg = spy.decodeMessage(event.data)
if hasattr(msg, 'images'):
msg = msg.images[0]
return msg, filename
def closeLogs(self):
for log in self.logs.values():
log.close()
self.logs = {}
class PlayThread(object):
def __init__(self, utimes, logLookup, speed):
self.fps = 60
self.shouldStop = False
self.utimes = utimes
self.logLookup = logLookup
self.speed = speed
self.lc = lcm.LCM(VIDEO_LCM_URL)
def start(self):
self.shouldStop = False
self.thread = threading.Thread(target=self.mainLoop)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.shouldStop = True
self.thread.join()
def mainLoop(self):
startTime = time.time()
while not self.shouldStop:
elapsedUtime = int(1e6 * (time.time() - startTime)*self.speed)
utimeIndex = self.utimes.searchsorted(self.utimes[0] + elapsedUtime)
if utimeIndex == len(self.utimes):
break
utimeRequest = self.utimes[utimeIndex]
image, filename = self.logLookup.getImage(utimeRequest)
print 'elapsed: %.2f index: %d play jitter: %.3f' % (elapsedUtime*1e-6, utimeIndex, (utimeRequest - (self.utimes[0] + elapsedUtime) )*1e-6)
self.lc.publish('VIDEO_PLAYBACK_IMAGE', image.encode())
time.sleep(1.0 / self.fps)
class ServerThread(object):
def __init__(self, sharedUtimeMap):
self.sharedUtimeMap = sharedUtimeMap
self.utimes = None
self.playbackThread = None
self.syncThread = None
self.timeWindow = 60
self.logLookup = LogLookup()
self.lc = lcm.LCM(VIDEO_LCM_URL)
self.lc.subscribe('VIDEO_PLAYBACK_CONTROL', self.onControlMessage)
def start(self):
self.thread = threading.Thread(target=self.mainLoop)
self.thread.daemon = True
self.shouldStop = False
self.thread.start()
def stop(self):
self.shouldStop = True
self.thread.join()
def getUtimeIndex(self, data):
assert 0.0 <= data.value <= 1.0
return (len(self.utimes)-1)*data.value
def onFrameRequest(self, data):
self.stopPlaybackThread()
if self.utimes is None:
self.logLookup.setUtimeMap(dict(self.sharedUtimeMap))
self.utimes = getRecentUtimes(self.logLookup.utimeMap, seconds=self.timeWindow)
if self.utimes is None:
print 'no utimes cataloged'
return
print 'starting review with utimes %d %d' % (self.utimes[0], self.utimes[1])
utimeIndex = self.getUtimeIndex(data)
utimeRequest = self.utimes[utimeIndex]
image, filename = self.logLookup.getImage(utimeRequest)
print 'location: %.2f index: %d utime: %d timeDelta: %.3f file: %s' % (data.value, utimeIndex, utimeRequest, (self.utimes[-1] - self.utimes[utimeIndex])*1e-6, os.path.basename(filename))
self.lc.publish('VIDEO_PLAYBACK_IMAGE', image.encode())
def onResume(self, data):
self.stopPlaybackThread()
self.utimes = None
self.logLookup.closeLogs()
return
def onPlay(self, data):
self.stopPlaybackThread()
if self.utimes is None:
print 'cannot play. no utimes available'
return
startIndex = self.getUtimeIndex(data)
playbackUtimes = self.utimes[startIndex:]
self.playbackThread = PlayThread(playbackUtimes, self.logLookup, speed=data.speed)
self.playbackThread.start()
def stopPlaybackThread(self):
if self.playbackThread:
self.playbackThread.stop()
self.playbackThread = None
if self.syncThread:
self.syncThread.stop()
self.syncThread = None
def onLogSync(self):
self.syncThread = LogSyncThread(self.sharedUtimeMap)
self.syncThread.start()
def unwrapCommand(self, msgBytes):
msg = spy.decodeMessage(msgBytes)
argDict = json.loads(msg.command)
return FieldData(**argDict)
def onControlMessage(self, channel, msgBytes):
data = self.unwrapCommand(msgBytes)
print data
if data.command == 'request_frame':
self.onFrameRequest(data)
elif data.command == 'resume':
self.onResume(data)
elif data.command == 'play':
self.onPlay(data)
elif data.command == 'log-sync':
self.onLogSync()
def mainLoop(self):
poll = LCMPoller(self.lc)
while not self.shouldStop:
poll.handleLCM()
class LogSyncThread(object):
def __init__(self, sharedUtimeMap):
self.sharedUtimeMap = sharedUtimeMap
self.utimes = None
self.logLookup = LogLookup()
self.lastPublishTime = time.time()
self.publishFrequency = 1/60.0
self.lcListen = lcm.LCM()
self.lc = lcm.LCM(VIDEO_LCM_URL)
self.sub = self.lcListen.subscribe('EST_ROBOT_STATE', self.onControlMessage)
def start(self):
self.thread = threading.Thread(target=self.mainLoop)
self.thread.daemon = True
self.shouldStop = False
self.thread.start()
def stop(self):
self.shouldStop = True
self.logLookup.closeLogs()
self.thread.join()
self.lcListen.unsubscribe(self.sub)
def updateLastPublishTime(self):
self.lastPublishTime = time.time()
def getLastPublishElapsedTime(self):
return time.time() - self.lastPublishTime
def onFrameRequest(self, utimeRequest):
if self.logLookup.utimeMap is None:
self.logLookup.setUtimeMap(dict(self.sharedUtimeMap))
assert len(self.logLookup.utimeMap)
self.utimes = np.array(self.logLookup.utimeMap.keys())
self.utimes.sort()
requestIndex = self.utimes.searchsorted(utimeRequest)
if requestIndex >= len(self.utimes):
requestIndex = len(self.utimes)-1
utimeFrame = self.utimes[requestIndex]
image, filename = self.logLookup.getImage(utimeFrame)
print 'utime request: %d utime frame: %d delta: %f file: %s' % (utimeRequest, utimeFrame, (utimeFrame-utimeRequest)*1e-6, os.path.basename(filename))
self.lc.publish('VIDEO_PLAYBACK_IMAGE', image.encode())
self.updateLastPublishTime()
def onControlMessage(self, channel, msgBytes):
if self.getLastPublishElapsedTime() > self.publishFrequency:
msg = spy.decodeMessage(msgBytes)
self.onFrameRequest(msg.utime)
def mainLoop(self):
poll = LCMPoller(self.lcListen)
while not self.shouldStop:
poll.handleLCM()
class CatalogThread(object):
def __init__(self, logDir, videoChannel):
self.videoChannel = videoChannel
self.logDir = logDir
self.pruneEnabled = True
self.maxNumberOfFiles = 30
self.cropTimeWindow = 60*30
self.utimeMap = {}
self.catalog = {}
def start(self):
self.thread = threading.Thread(target=self.mainLoop)
self.thread.daemon = True
self.shouldStop = False
self.thread.start()
def stop(self):
self.shouldStop = True
self.thread.join()
def mainLoop(self):
while not self.shouldStop:
self.updateCatalog()
time.sleep(0.3)
def updateCatalog(self):
logFiles = self.getExistingLogFiles(self.logDir)
if self.pruneEnabled:
logFiles = self.pruneLogFiles(logFiles, self.maxNumberOfFiles)
for logFile in logFiles:
self.updateLogInfo(logFile)
def updateLogInfo(self, filename):
fieldData = self.catalog.get(filename)
if not fieldData:
print 'discovered new file:', filename
fieldData = FieldData(filename=filename, fileSize=0, lastFilePos=0, channelTypes={})
self.catalog[filename] = fieldData
self.cropUtimeMap(self.utimeMap, self.cropTimeWindow)
log = lcm.EventLog(filename, 'r')
fileSize = log.size()
# if the log file is the same size as the last time it was inspected
# then there is no more work to do, return.
if fileSize == fieldData.fileSize:
return
fieldData.fileSize = fileSize
# seek to the last processed event, if one exists, then read past it
if fieldData.lastFilePos > 0:
log.seek(fieldData.lastFilePos)
event = log.read_next_event()
while True:
filepos = log.tell()
event = log.read_next_event()
if not event:
break
fieldData.lastFilePos = filepos
timestamp = event.timestamp
channel = event.channel
if channel == self.videoChannel:
self.utimeMap[timestamp] = (filename, filepos)
# maintain a catalog of channelName->messageType
#if channel not in fieldData.channelTypes:
# messageClass = spy.getMessageClass(event.data)
# fieldData.channelTypes[channel] = messageClass
log.close()
@staticmethod
def getExistingLogFiles(dirName):
# sort log filenames by splitting string into
# list of strings and numbers.
# Example filename: lcmlog-2014-04-16.25
# sort implementation taken from: http://stackoverflow.com/a/5967539
def atoi(text):
return int(text) if text.isdigit() else text
def splitKeys(text):
return [atoi(c) for c in re.split('(\d+)', text)]
filenames = glob.glob(dirName + '/lcmlog-*')
return sorted(filenames, key=splitKeys)
@staticmethod
def pruneLogFiles(logFiles, maxNumberOfFiles):
logFiles = list(logFiles)
while len(logFiles) > maxNumberOfFiles:
filename = logFiles.pop(0)
print 'deleting:', filename
os.remove(filename)
return logFiles
@staticmethod
def cropUtimeMap(utimeMap, timeWindow):
if not len(utimeMap):
return
utimes = getRecentUtimes(utimeMap, timeWindow)
cropTime = utimes[0]
for utime in utimeMap.keys():
if utime < cropTime:
del utimeMap[utime]
def main():
try:
logFileDir = sys.argv[1]
except IndexError:
print 'Usage: %s <log file directory>' % sys.argv[0]
sys.exit(1)
spy.findLCMModulesInSysPath()
catalogThread = CatalogThread(logFileDir, 'DECKLINK_VIDEO_CAPTURE')
catalogThread.start()
serverThread = ServerThread(catalogThread.utimeMap)
serverThread.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
|
import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr, cpython_only, start_threads,
temp_dir, requires_type_collecting, TESTFN, unlink,
import_module)
from test.support.script_helper import assert_python_ok, make_script
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@requires_type_collecting
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
@requires_type_collecting
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_38379(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
#self.assertEqual(t, 2) # before
self.assertEqual(t, 0) # after
#self.assertEqual(c - oldc, 2) # before
self.assertEqual(c - oldc, 0) # after
self.assertEqual(nc - oldnc, 0)
# Unfortunately, a Z() prevents _anything_ from being collected.
# It should be possible to collect the A instances anyway, but
# that will require non-trivial code changes.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
# Z() prevents anything from being collected.
t = gc.collect()
c, nc = getstats()
#self.assertEqual(t, 2*N + 2) # before
self.assertEqual(t, 0) # after
#self.assertEqual(c - oldc, 2*N + 2) # before
self.assertEqual(c - oldc, 0) # after
self.assertEqual(nc - oldnc, 0)
# But the A() trash is reclaimed on the next run.
oldc, oldnc = c, nc
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
self.assertRegex(stderr,
br'object : \[1, 2, 3\]')
self.assertRegex(stderr,
br'type : list')
self.assertRegex(stderr,
br'refcount: 1')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
self.assertRegex(stderr,
br'address : [0-9a-fA-Fx]+')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
|
# Copyright 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from datetime import datetime
from trove.common import cfg
from trove.common import exception
from trove.common import utils
from trove.common.exception import ModelNotFoundError
from trove.datastore import models as dstore_models
from trove.db import get_db_api
from trove.db import models as dbmodels
from trove.openstack.common import log as logging
from trove.common.i18n import _
from trove.taskmanager import api as task_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Configurations(object):
DEFAULT_LIMIT = CONF.configurations_page_size
@staticmethod
def load(context):
if context is None:
raise TypeError("Argument context not defined.")
elif id is None:
raise TypeError("Argument is not defined.")
if context.is_admin:
db_info = DBConfiguration.find_all(deleted=False)
if db_info.count() == 0:
LOG.debug("No configurations found for admin user")
else:
db_info = DBConfiguration.find_all(tenant_id=context.tenant,
deleted=False)
if db_info.count() == 0:
LOG.debug("No configurations found for tenant %s"
% context.tenant)
limit = int(context.limit or Configurations.DEFAULT_LIMIT)
if limit > Configurations.DEFAULT_LIMIT:
limit = Configurations.DEFAULT_LIMIT
data_view = DBConfiguration.find_by_pagination('configurations',
db_info,
"foo",
limit=limit,
marker=context.marker)
next_marker = data_view.next_page_marker
return data_view.collection, next_marker
class Configuration(object):
def __init__(self, context, configuration_id):
self.context = context
self.configuration_id = configuration_id
@property
def instances(self):
return self.instances
@property
def items(self):
return self.items
@staticmethod
def create(name, description, tenant_id, datastore, datastore_version):
configurationGroup = DBConfiguration.create(
name=name,
description=description,
tenant_id=tenant_id,
datastore_version_id=datastore_version)
return configurationGroup
@staticmethod
def create_items(cfg_id, values):
LOG.debug("Saving configuration values for %s - "
"values: %s" % (cfg_id, values))
config_items = []
for key, val in values.iteritems():
config_item = DBConfigurationParameter.create(
configuration_id=cfg_id,
configuration_key=key,
configuration_value=val)
config_items.append(config_item)
return config_items
@staticmethod
def delete(context, group):
deleted_at = datetime.utcnow()
Configuration.remove_all_items(context, group.id, deleted_at)
group.deleted = True
group.deleted_at = deleted_at
group.save()
@staticmethod
def remove_all_items(context, id, deleted_at):
items = DBConfigurationParameter.find_all(configuration_id=id,
deleted=False).all()
LOG.debug("Removing all configuration values for %s" % id)
for item in items:
item.deleted = True
item.deleted_at = deleted_at
item.save()
@staticmethod
def load_configuration_datastore_version(context, id):
config = Configuration.load(context, id)
datastore_version = dstore_models.DatastoreVersion.load_by_uuid(
config.datastore_version_id)
return datastore_version
@staticmethod
def load(context, id):
try:
if context.is_admin:
return DBConfiguration.find_by(id=id, deleted=False)
else:
return DBConfiguration.find_by(id=id,
tenant_id=context.tenant,
deleted=False)
except ModelNotFoundError:
msg = _("Configuration group with ID %s could not be found.") % id
raise ModelNotFoundError(msg)
@staticmethod
def find_parameter_details(name, detail_list):
for item in detail_list:
if item.name == name:
return item
return None
@staticmethod
def load_items(context, id):
datastore_v = Configuration.load_configuration_datastore_version(
context,
id)
config_items = DBConfigurationParameter.find_all(
configuration_id=id, deleted=False).all()
detail_list = DatastoreConfigurationParameters.load_parameters(
datastore_v.id)
for item in config_items:
rule = Configuration.find_parameter_details(
str(item.configuration_key), detail_list)
if not rule:
continue
if rule.data_type == 'boolean':
item.configuration_value = bool(int(item.configuration_value))
elif rule.data_type == 'integer':
item.configuration_value = int(item.configuration_value)
else:
item.configuration_value = str(item.configuration_value)
return config_items
def get_configuration_overrides(self):
"""Gets the overrides dictionary to apply to an instance."""
overrides = {}
if self.configuration_id:
config_items = Configuration.load_items(self.context,
id=self.configuration_id)
for i in config_items:
overrides[i.configuration_key] = i.configuration_value
return overrides
def does_configuration_need_restart(self):
datastore_v = Configuration.load_configuration_datastore_version(
self.context,
self.configuration_id)
config_items = Configuration.load_items(self.context,
id=self.configuration_id)
LOG.debug("config_items: %s" % config_items)
detail_list = DatastoreConfigurationParameters.load_parameters(
datastore_v.id, show_deleted=True)
for i in config_items:
LOG.debug("config item: %s" % i)
details = Configuration.find_parameter_details(
i.configuration_key, detail_list)
LOG.debug("parameter details: %s" % details)
if not details:
raise exception.NotFound(uuid=i.configuration_key)
if bool(details.restart_required):
return True
return False
@staticmethod
def save(context, configuration, configuration_items, instances):
DBConfiguration.save(configuration)
for item in configuration_items:
item["deleted_at"] = None
DBConfigurationParameter.save(item)
items = Configuration.load_items(context, configuration.id)
for instance in instances:
LOG.debug("Configuration %s being applied to "
"instance: %s" % (configuration.id, instance.id))
overrides = {}
for i in items:
overrides[i.configuration_key] = i.configuration_value
task_api.API(context).update_overrides(instance.id, overrides)
class DBConfiguration(dbmodels.DatabaseModelBase):
_data_fields = ['name', 'description', 'tenant_id', 'datastore_version_id',
'deleted', 'deleted_at', 'created', 'updated']
@property
def datastore(self):
datastore_version = dstore_models.DatastoreVersion.load_by_uuid(
self.datastore_version_id)
datastore = dstore_models.Datastore.load(
datastore_version.datastore_id)
return datastore
@property
def datastore_version(self):
datastore_version = dstore_models.DatastoreVersion.load_by_uuid(
self.datastore_version_id)
return datastore_version
class DBConfigurationParameter(dbmodels.DatabaseModelBase):
_data_fields = ['configuration_id', 'configuration_key',
'configuration_value', 'deleted',
'deleted_at']
def __hash__(self):
return self.configuration_key.__hash__()
class DBDatastoreConfigurationParameters(dbmodels.DatabaseModelBase):
"""Model for storing the configuration parameters on a datastore."""
_auto_generated_attrs = ['id']
_data_fields = [
'name',
'datastore_version_id',
'restart_required',
'max_size',
'min_size',
'data_type',
'deleted',
'deleted_at',
]
_table_name = "datastore_configuration_parameters"
preserve_on_delete = True
class DatastoreConfigurationParameters(object):
def __init__(self, db_info):
self.db_info = db_info
@staticmethod
def create(**kwargs):
"""Create a configuration parameter for a datastore version."""
# Do we already have a parameter in the db?
# yes: and its deleted then modify the param
# yes: and its not deleted then error on create.
# no: then just create the new param
ds_v_id = kwargs.get('datastore_version_id')
config_param_name = kwargs.get('name')
try:
param = DatastoreConfigurationParameters.load_parameter_by_name(
ds_v_id,
config_param_name,
show_deleted=True)
if param.deleted == 1:
param.restart_required = kwargs.get('restart_required')
param.data_type = kwargs.get('data_type')
param.max_size = kwargs.get('max_size')
param.min_size = kwargs.get('min_size')
param.deleted = 0
param.save()
return param
else:
raise exception.ConfigurationParameterAlreadyExists(
parameter_name=config_param_name,
datastore_version=ds_v_id)
except exception.NotFound:
pass
config_param = DBDatastoreConfigurationParameters.create(
**kwargs)
return config_param
@staticmethod
def delete(version_id, config_param_name):
config_param = DatastoreConfigurationParameters.load_parameter_by_name(
version_id, config_param_name)
config_param.deleted = True
config_param.deleted_at = datetime.utcnow()
config_param.save()
@classmethod
def load_parameters(cls, datastore_version_id, show_deleted=False):
try:
if show_deleted:
return DBDatastoreConfigurationParameters.find_all(
datastore_version_id=datastore_version_id
)
else:
return DBDatastoreConfigurationParameters.find_all(
datastore_version_id=datastore_version_id,
deleted=False
)
except exception.NotFound:
raise exception.NotFound(uuid=datastore_version_id)
@classmethod
def load_parameter(cls, config_id, show_deleted=False):
try:
if show_deleted:
return DBDatastoreConfigurationParameters.find_by(
id=config_id
)
else:
return DBDatastoreConfigurationParameters.find_by(
id=config_id, deleted=False
)
except exception.NotFound:
raise exception.NotFound(uuid=config_id)
@classmethod
def load_parameter_by_name(cls, datastore_version_id, config_param_name,
show_deleted=False):
try:
if show_deleted:
return DBDatastoreConfigurationParameters.find_by(
datastore_version_id=datastore_version_id,
name=config_param_name
)
else:
return DBDatastoreConfigurationParameters.find_by(
datastore_version_id=datastore_version_id,
name=config_param_name,
deleted=False
)
except exception.NotFound:
raise exception.NotFound(uuid=config_param_name)
def create_or_update_datastore_configuration_parameter(name,
datastore_version_id,
restart_required,
data_type,
max_size,
min_size):
get_db_api().configure_db(CONF)
datastore_version = dstore_models.DatastoreVersion.load_by_uuid(
datastore_version_id)
try:
config = DatastoreConfigurationParameters.load_parameter_by_name(
datastore_version_id, name, show_deleted=True)
config.restart_required = restart_required
config.max_size = max_size
config.min_size = min_size
config.data_type = data_type
get_db_api().save(config)
except exception.NotFound:
config = DBDatastoreConfigurationParameters(
id=utils.generate_uuid(),
name=name,
datastore_version_id=datastore_version.id,
restart_required=restart_required,
data_type=data_type,
max_size=max_size,
min_size=min_size,
deleted=False,
)
get_db_api().save(config)
def load_datastore_configuration_parameters(datastore,
datastore_version,
config_file):
get_db_api().configure_db(CONF)
(ds, ds_v) = dstore_models.get_datastore_version(
type=datastore, version=datastore_version, return_inactive=True)
with open(config_file) as f:
config = json.load(f)
for param in config['configuration-parameters']:
create_or_update_datastore_configuration_parameter(
param['name'],
ds_v.id,
param['restart_required'],
param['type'],
param.get('max'),
param.get('min'),
)
def persisted_models():
return {
'configurations': DBConfiguration,
'configuration_parameters': DBConfigurationParameter,
'datastore_configuration_parameters': DBDatastoreConfigurationParameters, # noqa
}
|
|
import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Concat(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.s = pd.Series(list('aabbcd') * N).astype('category')
self.a = pd.Categorical(list('aabbcd') * N)
self.b = pd.Categorical(list('bbcdjk') * N)
def time_concat(self):
pd.concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
class Constructor(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.categories = list('abcde')
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',
periods=N / 10,
freq='s'))
self.datetimes_with_nat = self.datetimes.copy()
self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
self.values_all_int8 = np.ones(N, 'int8')
def time_regular(self):
pd.Categorical(self.values, self.categories)
def time_fastpath(self):
pd.Categorical(self.codes, self.cat_idx, fastpath=True)
def time_datetimes(self):
pd.Categorical(self.datetimes)
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
def time_with_nan(self):
pd.Categorical(self.values_some_nan)
def time_all_nan(self):
pd.Categorical(self.values_all_nan)
def time_from_codes_all_int8(self):
pd.Categorical.from_codes(self.values_all_int8, self.categories)
class ValueCounts(object):
goal_time = 0.2
params = [True, False]
param_names = ['dropna']
def setup(self, dropna):
n = 5 * 10**5
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype('category')
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
class Repr(object):
goal_time = 0.2
def setup(self):
self.sel = pd.Series(['s1234']).astype('category')
def time_rendering(self):
str(self.sel)
class SetCategories(object):
goal_time = 0.2
def setup(self):
n = 5 * 10**5
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype('category')
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class Rank(object):
goal_time = 0.2
def setup(self):
N = 10**5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = self.s_str.astype('category')
with warnings.catch_warnings(record=True):
self.s_str_cat_ordered = self.s_str.astype('category',
ordered=True)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = self.s_int.astype('category')
with warnings.catch_warnings(record=True):
self.s_int_cat_ordered = self.s_int.astype('category',
ordered=True)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
class Isin(object):
goal_time = 0.2
params = ['object', 'int64']
param_names = ['dtype']
def setup(self, dtype):
np.random.seed(1234)
n = 5 * 10**5
sample_size = 100
arr = [i for i in np.random.randint(0, n // 10, size=n)]
if dtype == 'object':
arr = ['s%04d' % i for i in arr]
self.sample = np.random.choice(arr, sample_size)
self.series = pd.Series(arr).astype('category')
def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
class IsMonotonic(object):
def setup(self):
N = 1000
self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))
self.s = pd.Series(self.c)
def time_categorical_index_is_monotonic_increasing(self):
self.c.is_monotonic_increasing
def time_categorical_index_is_monotonic_decreasing(self):
self.c.is_monotonic_decreasing
def time_categorical_series_is_monotonic_increasing(self):
self.s.is_monotonic_increasing
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
class Contains(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
class CategoricalSlicing(object):
goal_time = 0.2
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**6
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': pd.Categorical(values),
'monotonic_decr': pd.Categorical(reversed(values)),
'non_monotonic': pd.Categorical(list('abc' * N))}
self.data = indices[index]
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = 'b'
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[:self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
from .pandas_vb_common import setup # noqa: F401
|
|
"""
pies/overrides.py
Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import functools
from numbers import Integral
from ._utils import unmodified_isinstance, with_metaclass
from .version_info import PY2, PY3, VERSION
native_dict = dict
native_round = round
native_filter = filter
native_map = map
native_zip = zip
native_range = range
native_str = str
native_chr = chr
native_input = input
native_next = next
native_object = object
common = ['native_dict', 'native_round', 'native_filter', 'native_map', 'native_range', 'native_str', 'native_chr',
'native_input', 'PY2', 'PY3', 'u', 'itemsview', 'valuesview', 'keysview', 'execute', 'integer_types',
'native_next', 'native_object', 'with_metaclass']
if PY3:
import urllib
import builtins
from urllib import parse
from collections import OrderedDict
integer_types = (int, )
def u(string):
return string
def itemsview(collection):
return collection.items()
def valuesview(collection):
return collection.values()
def keysview(collection):
return collection.keys()
urllib.quote = parse.quote
urllib.quote_plus = parse.quote_plus
urllib.unquote = parse.unquote
urllib.unquote_plus = parse.unquote_plus
urllib.urlencode = parse.urlencode
execute = getattr(builtins, 'exec')
if VERSION[1] < 2:
def callable(entity):
return hasattr(entity, '__call__')
common.append('callable')
__all__ = common + ['OrderedDict', 'urllib']
else:
from itertools import ifilter as filter
from itertools import imap as map
from itertools import izip as zip
from decimal import Decimal, ROUND_HALF_EVEN
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import codecs
str = unicode
chr = unichr
input = raw_input
range = xrange
integer_types = (int, long)
def _create_not_allowed(name):
def _not_allow(*args, **kwargs):
raise NameError("name '{0}' is not defined".format(name))
_not_allow.__name__ = name
return _not_allow
for removed in ('apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks'):
globals()[removed] = _create_not_allowed(removed)
def u(string):
return codecs.unicode_escape_decode(string[0])
def execute(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
class _dict_view_base(object):
__slots__ = ('_dictionary', )
def __init__(self, dictionary):
self._dictionary = dictionary
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, str(list(self.__iter__())))
def __unicode__(self):
return str(self.__repr__())
def __str__(self):
return str(self.__unicode__())
class dict_keys(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iterkeys()
class dict_values(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.itervalues()
class dict_items(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iteritems()
def itemsview(collection):
return dict_items(collection)
def valuesview(collection):
return dict_values(collection)
def keysview(collection):
return dict_keys(collection)
class dict(unmodified_isinstance(native_dict)):
def has_key(self, *args, **kwargs):
return AttributeError("'dict' object has no attribute 'has_key'")
def items(self):
return dict_items(self)
def keys(self):
return dict_keys(self)
def values(self):
return dict_values(self)
def round(number, ndigits=None):
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
exponent = Decimal('10') ** (-ndigits)
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d)
def next(iterator):
try:
iterator.__next__()
except Exception:
native_next(iterator)
class FixStr(type):
def __new__(cls, name, bases, dct):
if '__str__' in dct:
dct['__unicode__'] = dct['__str__']
dct['__str__'] = lambda self: self.__unicode__().encode('utf-8')
return type.__new__(cls, name, bases, dct)
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
return type.__instancecheck__(cls, instance)
class object(with_metaclass(FixStr, object)):
pass
__all__ = common + ['round', 'dict', 'apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks', 'str', 'chr',
'input', 'range', 'filter', 'map', 'zip', 'object']
|
|
import pytest
from unittest.mock import MagicMock, patch
from rota.mass import Mass, MassError
from rota.reader import Reader
# from rota.exclude import ExcludeError
@pytest.fixture()
def mass():
return Mass(
'saturday',
{
'needed': 2,
'startfrom': 1,
'exclude': [],
'readers': {}
}
)
class TestMass_init(object):
init_error_data = [
(
'',
{
'needed': 2,
'startfrom': 1,
'exclude': [],
'readers': {}
},
'Bad label'
),
(
'saturday',
{
'needed': 2,
'exclude': [],
'readers': {}
},
"Required key is missing ('startfrom'); Label: saturday"
),
(
'saturday',
{
'startfrom': 1,
'exclude': [],
'readers': {}
},
"Required key is missing ('needed'); Label: saturday"
),
(
'saturday',
{
'needed': 'B',
'startfrom': 1,
'exclude': [],
'readers': {}
},
"'needed' is invalid (B); Label: saturday"
),
(
'saturday',
{
'needed': 2,
'startfrom': 'A',
'exclude': [],
'readers': {}
},
"'startfrom' is invalid (A); Label: saturday"
),
]
init_error_ids = [
'no_label',
'no_startfrom',
'no_needed',
'bad_needed',
'bad_startfrom'
]
init_data = [
(
'saturday',
{
'needed': 2,
'startfrom': 1,
'exclude': [],
'readers': {}
}
),
(
'saturday',
{
'needed': 2,
'startfrom': 1,
'readers': {}
}
)
]
init_ids = [
'basic',
'no_exclude'
]
@pytest.mark.parametrize("label, data, expected", init_error_data,
ids=init_error_ids)
def test_init_errors(self, label, data, expected):
with pytest.raises(MassError, message=expected):
m = Mass(label, data) # noqa: F841
@pytest.mark.parametrize("label, data", init_data, ids=init_ids)
def test_init(self, label, data):
m = Mass(label, data) # noqa: F841
class TestMass_repr:
repr_data = [
# Test data for the __repr__ method
# Each set contains the label, data, and expected output
(
'saturday',
{
'needed': 2,
'startfrom': 1,
'exclude': [],
'readers': {}
},
"Mass('saturday', 2, 1, [], {})"
),
(
'saturday',
{
'needed': 2,
'startfrom': 1,
'exclude': ['01/01/2018'],
'readers':{
1: {
"name": "Gabrielle Bedford",
"exclude": []
}
}
},
# Long string split over two lines
"Mass('saturday', 2, 1, ['01/01/2018'], "
"{1: Reader(1, 'Gabrielle Bedford', [], [])})"
)
]
repr_ids = [
'basic',
'all'
]
@pytest.mark.parametrize("label, data, expected", repr_data, ids=repr_ids)
def test_repr(self, label, data, expected):
m = Mass(label, data)
assert repr(m) == expected
class TestMass:
def test_str(self, mass):
assert str(mass) == 'saturday'
def test_get_reader(self, mass):
with pytest.raises(
MassError,
match='No reader for ID: 1'
):
mass.get_reader(1)
def test_get_reader_one_reader(self, mass):
mass.readers[1] = 'Reader'
assert mass.get_reader(1) == 'Reader'
def test_add_reader(self, mass):
tr = MagicMock()
tr.id = 1
tr.name = 'A Reader'
mass.add_reader(tr)
assert mass.readers[1] == tr
def test_get_readers(self, mass):
r1 = MagicMock()
r1.id = 1
r1.name = 'Reader 1'
r2 = MagicMock()
r2.id = 2
r2.name = 'Reader 2'
mass.add_reader(r1)
mass.add_reader(r2)
assert mass.get_reader() == r1
assert mass.get_reader(2) == r2
assert mass.get_reader(1) == r1
class TestMass_allocate:
def test_allocate_2_slots_2_readers(self, mass):
a = MagicMock()
a.id = 1
a.get_name.return_value = ('Reader 1', 1)
b = MagicMock()
b.id = 2
b.get_name.return_value = ('Reader 2', 0)
mass.add_reader(a)
mass.add_reader(b)
expected = ['Reader 1', 'Reader 2']
assert mass.allocate(1) == expected
def test_allocate_3_slots_3_readers(self):
mass = Mass(
'saturday',
{
'needed': 3,
'startfrom': 1,
'exclude': [],
'readers': {}
}
)
a = MagicMock()
a.id = 1
a.get_name.return_value = ("One", 2)
b = MagicMock()
b.id = 2
b.get_name.return_value = ("Two A, Two B", 0)
mass.add_reader(a)
mass.add_reader(b)
expected = ['One', 'Two A, Two B']
assert mass.allocate(1) == expected
def test_allocate_2_slots_3_readers(self, mass):
a = MagicMock()
a.id = 1
a.get_name.return_value = ("One", 2)
b = MagicMock()
b.id = 2
b.get_name.return_value = ("Two", 0)
mass.add_reader(a)
mass.add_reader(b)
expected = ['One', 'Two']
assert mass.allocate(1) == expected
def test_allocate_3_slots_2_readers(self):
mass = Mass(
'saturday',
{
'needed': 3,
'startfrom': 1,
'exclude': [],
'readers': {}
}
)
a = MagicMock()
a.id = 1
a.get_name.return_value = ("One", 2)
b = MagicMock()
b.id = 2
b.get_name.return_value = ("Two", 1)
mass.add_reader(a)
mass.add_reader(b)
expected = 'Not enough readers for a single mass'
# with pytest.raises(Exception, message=expected):
mass.allocate(1)
|
|
#!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message):
if mode in gyp.debug.keys():
print "%s: %s" % (mode.upper(), message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file[-len(extension):] == extension:
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params={}, check=False):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
default_variables.update(generator.generator_default_variables)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check)
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
flags.append(FormatOpt(flag, predicate(flag_value)))
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables" '
'and "general"')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
break
home_dot_gyp = None
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
# TODO(thomasvl): add support for ~/.gyp/defaults
(options, build_files_arg) = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
options.formats = [ {'darwin': 'xcode',
'win32': 'msvs',
'cygwin': 'msvs',
'freebsd7': 'make',
'freebsd8': 'make',
'linux2': 'scons',}[sys.platform] ]
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for (option, value) in options.__dict__.items():
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
else:
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
if not build_files:
build_files = FindBuildFiles()
if not build_files:
print >>sys.stderr, (usage + '\n\n%s: error: no build_file') % \
(my_name, my_name)
return 1
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise Exception, \
'Could not automatically locate src directory. This is a ' + \
'temporary Chromium feature that will be removed. Use ' + \
'--depth as a workaround.'
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s" % cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
# Done
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module defining the YAMLConnector class."""
import os
driver = True
try:
import yaml
except ImportError:
driver = False
from ext.aboard.dc.connector import DataConnector
from ext.aboard.dc import exceptions
from ext.aboard.model import exceptions as mod_exceptions
from ext.aboard.model.functions import *
class YAMLConnector(DataConnector):
"""Data connector for YAML.
This data connector should read and write datas in YML format, using
the yaml library.
A very short example:
# Table: users
- id: 1
username: admin
email_address: admin@python-aboard.org
"""
name = "yaml"
def __init__(self):
"""Check the driver presence.
If not found, raise a DriverNotFound exception.
"""
if not driver:
raise exceptions.DriverNotFound(
"the yaml library can not be found")
self.location = None
self.auto_increments = {}
self.to_update = set()
def setup(self, location=None):
"""Setup the data connector."""
if location is None:
raise exceptions.InsufficientConfiguration(
"the location for storing datas was not specified for " \
"the YAML data connector")
location = location.replace("\\", "/")
if location.startswith("~"):
location = os.path.expanduser("~") + location[1:]
if location.endswith("/"):
location = location[:-1]
if not os.path.exists(location):
# Try to create it
os.makedirs(location)
if not os.access(location, os.R_OK):
raise exceptions.DriverInitializationError(
"cannot read in {}".format(location))
if not os.access(location, os.W_OK):
raise exceptions.DriverInitializationError(
"cannot write in {}".format(location))
DataConnector.__init__(self)
self.location = location
self.files = {}
def close(self):
"""Close the data connector (nothing to be done)."""
pass
def destroy(self):
"""Erase EVERY stored data."""
for file in os.listdir(self.location):
os.remove(self.location + "/" + file)
self.clear_cache()
def record_model(self, model):
"""Record the given model."""
name = DataConnector.record_model(self, model)
filename = self.location + "/" + name + ".yml"
if os.path.exists(filename):
with open(filename, "r") as file:
self.read_table(name, file)
self.files[name] = filename
def read_table(self, table_name, file):
"""Read a whoe table contained in a file.
This file is supposed to be formatted as a YAML file. Furthermore,
the 'yaml.load' function should return a list of dictionaries.
The first dictionary describes some table informations, as
the status of the autoincrement fields. Each following dictionary
is a line of data which sould describe a model object.
"""
name = table_name
content = file.read()
datas = yaml.load(content)
if not isinstance(datas, list):
raise exceptions.DataFormattingError(
"the file {} must contain a YAML formatted list".format(
self.files[name]))
class_table = self.models[name]
class_datas = datas[0]
if not isinstance(class_datas, dict):
raise exceptions.DataFormattingError(
"the table informations are not stored in a YAML " \
"dictionary in the file {}".format(self.files[name]))
self.read_table_header(name, class_datas)
objects = {}
for line in datas[1:]:
object = class_table.build(**line)
pkey = get_pkey_values(object)
if len(pkey) == 1:
pkey = pkey[0]
objects[pkey] = object
self.objects_tree[name] = objects
def read_table_header(self, name, datas):
"""Read the table header.
This header should describe some informations concerning the
table (as the autoincrement fields).
"""
auto_increments = datas.get("auto_increments", [])
self.auto_increments[name] = auto_increments
def loop(self):
"""Write the YAML tables."""
for table in self.to_update:
self.write_table(table)
self.to_update.clear()
def write_table(self, name):
"""Write the table in a file."""
# First, we get the header
header = {}
if name in self.auto_increments:
header["auto_increments"] = self.auto_increments[name]
# Next we browse the object
objects = []
for object in self.objects_tree[name].values():
objects.append(object.__dict__)
objects.insert(0, header)
content = yaml.dump(objects, default_flow_style=False)
with open(self.location + "/" + name + ".yml", "w") as file:
file.write(content)
def get_all_objects(self, model):
"""Return all the model's object in a list."""
name = get_name(model)
return list(self.objects_tree.get(name, {}).values())
def find_object(self, model, pkey_values):
"""Return, if found, the selected object.
Raise a model.exceptions.ObjectNotFound if not found.
"""
# Look for the object in the cached tree
object = self.get_from_cache(model, pkey_values)
if object:
return object
raise mod_exceptions.ObjectNotFound(model, pkey_values)
def add_object(self, object):
"""Save the object, issued from a model."""
name = get_name(type(object))
fields = get_fields(type(object))
auto_increments = self.auto_increments.get(name, {})
for field in fields:
if not field.auto_increment:
continue
value = auto_increments.get(field.field_name, 1)
update_attr(object, field.field_name, value)
auto_increments[field.field_name] = value + 1
self.cache_object(object)
self.auto_increments[name] = auto_increments
self.to_update.add(name)
def update_object(self, object, attribute, old_value):
"""Update an object."""
self.check_update(object)
field = getattr(type(object), attribute)
self.update_cache(object, field, old_value)
name = get_name(type(object))
self.to_update.add(name)
def remove_object(self, object):
"""Delete the object."""
# Delete from cache only
self.uncache_object(object)
name = get_name(type(object))
self.to_update.add(name)
|
|
# License: BSD 3 clause
import numpy as np
from tick.hawkes.inference.base import LearnerHawkesNoParam
from tick.hawkes.inference.build.hawkes_inference import (HawkesBasisKernels as
_HawkesBasisKernels)
from tick.solver.base.utils import relative_distance
class HawkesBasisKernels(LearnerHawkesNoParam):
"""This class is used for performing non parametric estimation of
multi-dimensional Hawkes processes based on expectation maximization
algorithm and the hypothesis that kernels are linear
combinations of some basis kernels.
Hawkes processes are point processes defined by the intensity:
.. math::
\\forall i \\in [1 \\dots D], \\quad
\\lambda_i = \\mu_i + \\sum_{j=1}^D \\int \\phi_{ij} dN_j
where
* :math:`D` is the number of nodes
* :math:`\mu_i` are the baseline intensities
* :math:`\phi_{ij}` are the kernels
The basis kernel hypothesis translates to:
.. math::
\\phi_{ij}(t) = \\sum_{u}^U a_{ij}^u g^u(t)
where
* :math:`U` is the number of basis kernels.
* :math:`g^u` is a basis kernel
* :math:`a_{ij}^u` is the amplitude of basis kernel :math:`u` in kernel
:math:`\phi_{ij}`
Finally we also suppose that basis kernels :math:`g^u` are piecewise
constant on a given support and number of intervals.
Parameters
----------
kernel_support : `float`
The support size common to all the kernels.
n_basis : `int`, default=`None`
Number of non parametric basis kernels to be used.
If `None` or 0, it will be set to `n_nodes`
kernel_size : `int`, default=10
Number of discretizations of the kernel
C : `float`, default=1e-1
The penalization parameter. It penalizes both the amplitudes
squared values and the basis kernels smoothness through the
integral of their squared derivative.
tol : `float`, default=1e-5
The tolerance of the solver (iterations stop when the stopping
criterion is below it). If not reached the solver does ``max_iter``
iterations
max_iter : `int`, default=100
Maximum number of iterations of the solver
verbose : `bool`, default=False
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
record_every : `int`, default=10
Record history information when ``n_iter`` (iteration number) is
a multiple of ``record_every``
n_threads : `int`, default=1
Number of threads used for parallel computation.
* if `int <= 0`: the number of physical cores available on the CPU
* otherwise the desired number of threads
Other Parameters
----------------
ode_max_iter : `int`, default=100
Maximum number of loop for inner ODE (ordinary differential equation)
algorithm.
ode_tol : `float`, default=1e-5
Tolerance of loop for inner inner ODE (ordinary differential equation)
algorithm.
Attributes
----------
n_nodes : `int`
Number of nodes of the estimated Hawkes process
n_realizations : `int`
Number of given realizations`
baseline : `np.array` shape=(n_nodes)
The estimated baseline
amplitudes : `np.array` shape=(n_nodes, n_nodes, n_basis)
Amplitudes of all basis kernels for all kernels.
basis_kernels : `np.array` shape=(n_basis, kernel_size)
Estimated basis kernels
kernel_dt : `float`
Kernel discretization step. It is equal to
`kernel_support` / `kernel_size`
kernel_discretization : `np.ndarray`, shape=(kernel_size + 1, )
Kernel discretizations points, denotes the interval on which basis
kernels are piecewise constant.
References
----------
Zhou, K., Zha, H. and Song, L., 2013, June. Learning Triggering Kernels for
Multi-dimensional Hawkes Processes. In `ICML (3) (pp. 1301-1309)`_.
Some rewriting notes for implementing the algorithm can be found in the
doc/tex directory.
.. _ICML (3) (pp. 1301-1309): http://jmlr.org/proceedings/papers/v28/zhou13.html
"""
_attrinfos = {
'baseline': {
'writable': False
},
'amplitudes': {
'writable': False
},
'basis_kernels': {
'writable': False
},
'_amplitudes_2d': {
'writable': False
},
}
def __init__(self, kernel_support, n_basis=None, kernel_size=10, tol=1e-5,
C=1e-1, max_iter=100, verbose=False, print_every=10,
record_every=10, n_threads=1, ode_max_iter=100, ode_tol=1e-5):
LearnerHawkesNoParam.__init__(self, max_iter=max_iter, verbose=verbose,
tol=tol, print_every=print_every,
record_every=record_every,
n_threads=n_threads)
self.ode_max_iter = ode_max_iter
self.ode_tol = ode_tol
alpha = 1. / C
if n_basis is None:
n_basis = 0
self._learner = _HawkesBasisKernels(kernel_support, kernel_size,
n_basis, alpha, n_threads)
self._amplitudes_2d = None
self.history.print_order = [
"n_iter", "rel_baseline", "rel_amplitudes", "rel_basis_kernels"
]
def fit(self, events, end_times=None, baseline_start=None,
amplitudes_start=None, basis_kernels_start=None):
"""Fit the model according to the given training data.
Parameters
----------
events : `list` of `list` of `np.ndarray`
List of Hawkes processes realizations.
Each realization of the Hawkes process is a list of n_node for
each component of the Hawkes. Namely `events[i][j]` contains a
one-dimensional `numpy.array` of the events' timestamps of
component j of realization i.
If only one realization is given, it will be wrapped into a list
end_times : `np.ndarray` or `float`, default = None
List of end time of all hawkes processes that will be given to the
model. If None, it will be set to each realization's latest time.
If only one realization is provided, then a float can be given.
baseline_start : `None` or `np.ndarray`, shape=(n_nodes)
Used to force start values for baseline attribute
If `None` starts with uniform 1 values
amplitudes_start : `None` or `np.ndarray`, shape=(n_nodes,n_nodes,D)
Used to force start values for amplitude parameter
If `None` starts with random values uniformly sampled between
0.5 and 0.9
basis_kernels_start : `None` or `np.darray`, shape=(D,kernel_size)
Used to force start values for the basis kernels
If `None` starts with random values uniformly sampled between
0 and 0.1
"""
LearnerHawkesNoParam.fit(self, events, end_times=end_times)
self.solve(baseline_start=baseline_start,
amplitudes_start=amplitudes_start,
basis_kernels_start=basis_kernels_start)
return self
def _solve(self, baseline_start=None, amplitudes_start=None,
basis_kernels_start=None):
"""Perform nonparametric estimation
Parameters
----------
baseline_start : `None` or `np.ndarray`, shape=(n_nodes)
Used to force start values for baseline attribute
If `None` starts with uniform 1 values
amplitudes_start : `None` or `np.ndarray', shape=(n_nodes,n_nodes,D)
Used to force start values for amplitude parameter
If `None` starts with random values uniformly sampled between
0.5 and 0.9
basis_kernels_start : `None` or `p.andarray, shape=(D,kernel_size)
Used to force start values for the basis kernels
If `None` starts with random values uniformly sampled between
0 and 0.1
"""
if baseline_start is None:
self._set("baseline", np.zeros(self.n_nodes) + 1)
else:
self._set("baseline", baseline_start.copy())
if amplitudes_start is None:
self._set(
"amplitudes",
np.random.uniform(
0.5, 0.9, size=(self.n_nodes, self.n_nodes, self.n_basis)))
else:
self._set("amplitudes", amplitudes_start.copy())
if basis_kernels_start is None:
self._set(
"basis_kernels",
0.1 * np.random.uniform(size=(self.n_basis, self.kernel_size)))
else:
self._set("basis_kernels", basis_kernels_start.copy())
self._set(
'_amplitudes_2d',
self.amplitudes.reshape((self.n_nodes,
self.n_nodes * self.n_basis)))
for i in range(self.max_iter):
if self._should_record_iter(i):
prev_baseline = self.baseline.copy()
prev_amplitudes = self.amplitudes.copy()
prev_basis_kernels = self.basis_kernels.copy()
rel_ode = self._learner.solve(self.baseline, self.basis_kernels,
self._amplitudes_2d,
self.ode_max_iter, self.ode_tol)
if self._should_record_iter(i):
rel_baseline = relative_distance(self.baseline, prev_baseline)
rel_amplitudes = relative_distance(self.amplitudes,
prev_amplitudes)
rel_basis_kernels = relative_distance(self.basis_kernels,
prev_basis_kernels)
converged = max(rel_baseline, rel_amplitudes,
rel_basis_kernels) <= self.tol
force_print = (i + 1 == self.max_iter) or converged
self._handle_history(i + 1, rel_baseline=rel_baseline,
rel_amplitudes=rel_amplitudes,
rel_basis_kernels=rel_basis_kernels,
rel_ode=rel_ode, force=force_print)
if converged:
break
def get_kernel_supports(self):
return np.zeros((self.n_nodes, self.n_nodes)) + self.kernel_support
def get_kernel_values(self, i, j, abscissa_array):
"""Computes value of the specified kernel on given time values. This
makes our learner compliant with `tick.plot.plot_hawkes_kernels` API
Parameters
----------
i : `int`
First index of the kernel
j : `int`
Second index of the kernel
abscissa_array : `np.ndarray`, shape=(n_points, )
1d array containing all the times at which this kernel will
computes it value
Returns
-------
output : `np.ndarray`, shape=(n_points, )
1d array containing the values of the specified kernels at the
given times.
"""
indices_in_support = (abscissa_array > 0) & \
(abscissa_array < self.kernel_support)
index = np.searchsorted(self.kernel_discretization,
abscissa_array[indices_in_support]) - 1
kernel_values = np.empty_like(abscissa_array)
kernel_values[np.invert(indices_in_support)] = 0
kernels_ij_sum = np.zeros(self.kernel_size)
for d in range(self.n_basis):
kernels_ij_sum += self.amplitudes[i, j, d] * self.basis_kernels[d]
kernel_values[indices_in_support] = kernels_ij_sum[index]
return kernel_values
def objective(self, coeffs, loss: float = None):
raise NotImplementedError()
@property
def kernel_support(self):
return self._learner.get_kernel_support()
@kernel_support.setter
def kernel_support(self, val):
self._learner.set_kernel_support(val)
@property
def kernel_size(self):
return self._learner.get_kernel_size()
@kernel_size.setter
def kernel_size(self, val):
self._learner.set_kernel_size(val)
@property
def n_basis(self):
return self._learner.get_n_basis()
@n_basis.setter
def n_basis(self, val):
if val is None:
val = 0
self._learner.set_n_basis(val)
@property
def C(self):
return 1. / self._learner.get_alpha()
@C.setter
def C(self, val):
self._learner.set_alpha(1. / val)
@property
def kernel_discretization(self):
return self._learner.get_kernel_discretization()
@property
def kernel_dt(self):
return self._learner.get_kernel_dt()
@kernel_dt.setter
def kernel_dt(self, val):
self._learner.set_kernel_dt(val)
|
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.nn import Parameter
from model import ConvInputModel, FCOutputModel, BasicModel, Highway
class RFES(BasicModel):
def __init__(self, args):
super(RFES, self).__init__(args, 'RFES')
self.debug = args.debug
self.conv = ConvInputModel()
# output is 24 channels in a 5x5 grid
self.coord_extra_len = args.coord_extra_len
# prepare coord tensor
def cvt_coord(idx):
i, j = idx/5, idx%5
if self.coord_extra_len==2:
return [(i-2)/2., (j-2)/2.]
if self.coord_extra_len==6:
return [
(i-2)/2., (i%2), (1. if (i>0) else 0.),
(j-2)/2., (j%2), (1. if (j>0) else 0.),
]
np_coord_tensor = np.zeros((args.batch_size, 25, self.coord_extra_len))
for idx in range(25):
np_coord_tensor[:,idx,:] = np.array( cvt_coord(idx) )
coord_tensor = torch.FloatTensor(args.batch_size, 25, self.coord_extra_len)
if args.cuda:
coord_tensor = coord_tensor.cuda()
self.coord_tensor = Variable(coord_tensor)
self.coord_tensor.data.copy_(torch.from_numpy(np_coord_tensor))
self.question_size = 11
self.answer_size = 10
self.rnn_hidden_size = args.rnn_hidden_size # must be > question_size and answer_size
# 24+self.coord_extra_len+self.coord_extra_len = key_size + value_size
if self.coord_extra_len==2:
self.key_size = self.query_size = 12
self.value_size = 16
else:
self.key_size = self.query_size = 20
self.value_size = 16
self.process_coords = args.process_coords
if self.process_coords:
print("Create additional 1x1 convolutions to process coords additionally per point")
self.coord_tensor_permuted = self.coord_tensor.permute(0,2,1)
d_in, d_out = 24+self.coord_extra_len, self.key_size+self.value_size
#print(d_in, d_out)
if not (d_out == 24+self.coord_extra_len+self.coord_extra_len):
print("Sizing of coordinate-enhanced 5x5 images does not match additional conv layers")
exit(1)
# These are 1d convs (since only 1x1 kernels anyway, and better shapes for below...)
self.conv1 = nn.Conv1d(d_in, d_in, kernel_size=1, padding=0)
self.batchNorm1 = nn.BatchNorm2d(d_in) # d_hidden==d_in here
self.conv2 = nn.Conv1d(d_in, d_out, kernel_size=1, padding=0)
self.batchNorm2 = nn.BatchNorm2d(d_out)
k_blank = torch.randn( (1, 1, self.key_size) )
if args.cuda:
k_blank = k_blank.cuda()
self.k_blank = Parameter(k_blank, requires_grad=True)
v_blank = torch.zeros( (1, 1, self.value_size) )
if args.cuda:
v_blank = v_blank.cuda()
self.v_blank = Variable(v_blank, requires_grad=False) # This is just fixed at ==0 == 'STOP'
#seq_len=8
#seq_len=4
#seq_len=2 # Works well enough to be on a par with RN
#seq_len=1
self.seq_len = args.seq_len
ent_stream_rnn1_hidden_pad = torch.randn( (1, self.rnn_hidden_size-self.question_size) )
if args.cuda:
ent_stream_rnn1_hidden_pad = ent_stream_rnn1_hidden_pad.cuda()
self.ent_stream_rnn1_hidden_pad = Parameter(ent_stream_rnn1_hidden_pad, requires_grad=True)
#print("ent_stream_rnn1_hidden_pad.size() : ", self.ent_stream_rnn1_hidden_pad.size()) # (5)
ent_stream_rnn1_start = torch.randn( (1, self.value_size) )
if args.cuda:
ent_stream_rnn1_start = ent_stream_rnn1_start.cuda()
self.ent_stream_rnn1_start = Parameter(ent_stream_rnn1_start, requires_grad=True)
self.ent_stream_rnn1 = nn.GRUCell(self.value_size, self.rnn_hidden_size) #input_size, hidden_size, bias=True)
ent_stream_rnn2_hidden = torch.randn( (1, self.rnn_hidden_size) )
if args.cuda:
ent_stream_rnn2_hidden = ent_stream_rnn2_hidden.cuda()
self.ent_stream_rnn2_hidden = Parameter(ent_stream_rnn2_hidden, requires_grad=True)
self.ent_stream_rnn2 = nn.GRUCell(self.rnn_hidden_size, self.rnn_hidden_size) #input_size, hidden_size, bias=True)
self.stream_rnn_to_query = nn.Linear(self.rnn_hidden_size, self.query_size)
self.highway=args.highway
if self.highway==1:
#self.stream_rnn_switcher = nn.Linear(self.rnn_hidden_size, self.rnn_hidden_size)
#self.stream_rnn_extra = nn.Linear(self.rnn_hidden_size, self.rnn_hidden_size)
# Highway(input_size, num_layers, f=torch.nn.functional.relu)
self.stream_rnn_highway = Highway(self.rnn_hidden_size, 1, f=F.relu)
# No parameters needed for softmax attention...
# Temperature for Gumbel?
stream_question_hidden_pad = torch.randn( (1, self.rnn_hidden_size-self.question_size) )
if args.cuda:
stream_question_hidden_pad = stream_question_hidden_pad.cuda()
self.stream_question_hidden_pad = Parameter(stream_question_hidden_pad, requires_grad=True)
self.stream_question_rnn = nn.GRUCell(self.value_size, self.rnn_hidden_size)
stream_answer_hidden = torch.randn( (1, self.rnn_hidden_size) )
if args.cuda:
stream_answer_hidden = stream_answer_hidden.cuda()
self.stream_answer_hidden = Parameter(stream_answer_hidden, requires_grad=True)
self.stream_answer_rnn = nn.GRUCell(self.rnn_hidden_size, self.rnn_hidden_size)
self.stream_answer_to_output = nn.Linear(self.rnn_hidden_size, self.answer_size)
#for param in self.parameters():
# print(type(param.data), param.size())
self.optimizer = optim.Adam(self.parameters(), lr=args.lr)
def forward(self, img, qst):
x = self.conv(img) ## x = (64 x 24 x 5 x 5) = (batch#, channels, x-s, y-s)
"""g"""
batch_size = x.size()[0] # minibatch
n_channels = x.size()[1] # output features of CNN (24 normally or 28 if process_coords)
d = x.size()[2] # grid size over image
if self.process_coords:
# Add in the coordinates here...
#print("process_coords : x_from-cnn.size(): ", x.size())
x_flatter = x.view(batch_size, n_channels, d*d)
#print("x_flatter.size(): ", x_flatter.size())
#print("coord_tensor.size(): ", self.coord_tensor.size())
#print("coord_tensor.permuted.size(): ", self.coord_tensor.permute(0,2,1).size())
#print("coord_tensor_permuted.size(): ", self.coord_tensor_permuted.size())
#x_plus = torch.cat([x_flatter, self.coord_tensor.permute(0,2,1) ], 1)
x_plus = torch.cat([x_flatter, self.coord_tensor_permuted ], 1)
#print("x_plus.size(): ", x_plus.size())
x = self.conv1(x_plus)
x = F.relu(x)
x = self.batchNorm1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.batchNorm2(x)
#print("x_after-1x1s.size(): ", x.size()) # 32,28,25
x_flat = x.view(batch_size, self.key_size+self.value_size, d*d).permute(0,2,1)
# x_flat = (64 x 25 x 28)
ks_image = x_flat.narrow(2, 0, self.key_size)
vs_image = x_flat.narrow(2, self.key_size, self.value_size)
else:
#print("Just concat coordinates : x_from-cnn.size(): ", x.size())
x_flat = x.view(batch_size, n_channels, d*d).permute(0,2,1)
# x_flat = (64 x 25 x 24)
ks_nocoords = x_flat.narrow(2, 0, self.key_size-self.coord_extra_len)
vs_nocoords = x_flat.narrow(2, self.key_size-self.coord_extra_len, self.value_size-self.coord_extra_len)
# add coordinates (since these haven't been included yet)
ks_image = torch.cat([ks_nocoords, self.coord_tensor], 2)
vs_image = torch.cat([vs_nocoords, self.coord_tensor], 2)
#print("ks_image.size() : ", ks_image.size()) # (32,25,12)
#print("vs_image.size() : ", vs_image.size()) # (32,25,16)
# add the 'end of choices' element
#print("self.k_blank.size() : ", self.k_blank.size()) # (1,1,12)
#print("self.k_blank.expand().size() : ", self.k_blank.expand( (batch_size, 1, self.key_size) ).size() ) # (32,1,12)
ks = torch.cat([ks_image, self.k_blank.expand( (batch_size, 1, self.key_size) )], 1)
#print("ks.size() : ", ks.size()) # (32,26,12)
vs = torch.cat([vs_image, self.v_blank.expand( (batch_size, 1, self.value_size) )], 1)
#print("vs.size() : ", vs.size()) # (32,26,16)
#print("qst.size() : ", qst.size()) # (32,11)
seq_len = self.seq_len
ent_stream_rnn1_hidden = torch.cat(
[qst, self.ent_stream_rnn1_hidden_pad.expand( (batch_size, self.rnn_hidden_size-self.question_size) )], 1)
#print("ent_stream_rnn_hidden.size() : ", ent_stream_rnn_hidden.size()) # (32,16)
ent_stream_rnn1_input = self.ent_stream_rnn1_start.expand( (batch_size, self.value_size) )
ent_stream_rnn2_hidden = self.ent_stream_rnn2_hidden.expand( (batch_size, self.rnn_hidden_size) )
stream_logits, ent_similarities, ent_weights_arr, stream_values = [],[],[],[] # Will be filled by RNN and attention process
for i in range(seq_len):
#print("ent_stream_rnn_input.size() : ", ent_stream_rnn_input.size()) # (32,16)
#print("ent_stream_rnn_hidden.size() : ", ent_stream_rnn_hidden.size()) # (32,16)
ent_stream_rnn1_hidden = self.ent_stream_rnn1(ent_stream_rnn1_input, ent_stream_rnn1_hidden)
ent_stream_rnn2_hidden = self.ent_stream_rnn2(ent_stream_rnn1_hidden, ent_stream_rnn2_hidden)
# Works a tiny bit better than without
#ent_stream_rnn2_hidden = F.relu(ent_stream_rnn2_hidden)
if self.highway==1:
#ent_stream_rnn2_hidden_save = ent_stream_rnn2_hidden
#ent_stream_rnn2_hidden = ent_stream_rnn2_hidden_save
# this seems to get stuck...
ent_stream_rnn2_hidden = self.stream_rnn_highway(ent_stream_rnn2_hidden)
# Try this after highway, rather than before
ent_stream_rnn2_hidden = F.relu(ent_stream_rnn2_hidden)
ent_stream_logits = ent_stream_rnn2_hidden
if self.debug:
stream_logits.append( ent_stream_logits )
# Convert the ent_stream hidden layer to a query via a Linear unit
qs = self.stream_rnn_to_query( ent_stream_logits )
#print("qs.size() : ", qs.size()) # (32,12)
#print("qs.unsqueeze(2).size() : ", torch.unsqueeze(qs, 2).size()) # (32,12,1)
# Now do the dot-product with the keys (flattened image-like)
ent_similarity = torch.bmm( ks, torch.unsqueeze(qs, 2) )
#print("ent_similarity.size() : ", ent_similarity.size()) # (32,26,1)
if self.debug:
ent_similarities.append( torch.squeeze( ent_similarity) )
if True:
# Softmax to get the weights
#ent_weights = torch.nn.Softmax()( torch.squeeze( ent_similarity) ) #WORKED
ent_weights = F.softmax( torch.squeeze( ent_similarity) )
if False:
# Gumbel-Softmax to get the weights:
ent_weights = gumbel_softmax_sample( torch.squeeze( ent_similarity), temperature=0.2 )
#print("ent_weights.size() : ", ent_weights.size()) # (32,26)
#print("ent_weights.unsqueeze(2).size() : ", torch.unsqueeze(ent_weights,2).size()) # (32,26,1)
#print("ent_weights.unsqueeze(1).size() : ", torch.unsqueeze(ent_weights,1).size()) # (32,1,26)
if self.debug:
ent_weights_arr.append( ent_weights )
# Now multiply through to get the resulting values
stream_next_value = torch.squeeze( torch.bmm( torch.unsqueeze(ent_weights,1), vs ) )
#print("stream_next_value.size() : ", stream_next_value.size()) # (32, 16)
stream_values.append(stream_next_value)
ent_stream_rnn1_input = stream_next_value
# Now interpret the values from the stream
stream_question_hidden = torch.cat(
[qst, self.stream_question_hidden_pad.expand( (batch_size, self.rnn_hidden_size-self.question_size) )], 1)
stream_answer_hidden = self.stream_answer_hidden.expand( (batch_size, self.rnn_hidden_size) )
#print("stream_answer_hidden0", stream_answer_hidden)
stream_answer_hidden_arr = []
for stream_question_rnn_input in stream_values:
#print("stream_question_rnn_input.size() : ", stream_question_rnn_input.size()) # (32,16)
#print("stream_question_hidden.size() : ", stream_question_hidden.size()) # (32,16)
stream_question_hidden = self.stream_question_rnn(stream_question_rnn_input, stream_question_hidden)
#print("stream_question_hidden.size() : ", stream_question_hidden.size()) # (32,16)
#print("stream_answer_hidden.size() : ", stream_answer_hidden.size()) # (32,16)
stream_answer_hidden = self.stream_answer_rnn(stream_question_hidden, stream_answer_hidden)
#print("stream_answer_hidden", stream_answer_hidden)
stream_answer_hidden_arr.append( stream_answer_hidden )
# Final answer is in stream_answer_hidden (final value)
#ans = stream_answer_hidden.narrow(1, 0, self.answer_size) # No: Let's do a final linear on it...
#print("ans.size() : ", ans.size()) # (32,10)
if self.highway==2: # [][32batch, 32hidden]
stream_answer_hidden_max = torch.cat( stream_answer_hidden_arr, 1)
#print("stream_answer_hidden_max.size() : ", stream_answer_hidden_max.size()) # (32,32)
#ans = self.stream_answer_to_output( )
ans = self.stream_answer_to_output( stream_answer_hidden ) # Temp
else:
ans = self.stream_answer_to_output( stream_answer_hidden )
if self.debug:
self.stream_logits = stream_logits
self.ent_similarities = ent_similarities
self.ent_weights_arr = ent_weights_arr
self.stream_values = stream_values
self.ans_logits = ans
return F.log_softmax(ans) # log_softmax is what's expected
# https://www.reddit.com/r/MachineLearning/comments/6d44i7/d_how_to_use_gumbelsoftmax_for_policy_gradient/
# The gumbel-softmax is for a more specific case that being able to approximate a gradient
# for any non-differentiable function. Softmax is exactly what is says on the tin; a soft-max.
# The max function is not differentiable but is often used to sample from a distribution
# by taking the highest probability. The softmax can be used to approximate the max function
# and is differentiable. So what you can do is take the max in the forward pass but use softmax
# during the backward pass in order to be able to pass gradients though it.
# You can then anneal the softmax function temperature so that the approximation gets closer and closer
# to the true max function during training to lower the error in the approximation.
# Blog post :
# http://blog.evjang.com/2016/11/tutorial-categorical-variational.html
# TF code : https://gist.github.com/ericjang/1001afd374c2c3b7752545ce6d9ed349
# Keras notebook version : https://github.com/EderSantana/gumbel
# Theano / Lasagne : https://github.com/yandexdataschool/gumbel_lstm
# For the 'hard' version, plain argmax is used (at https://github.com/yandexdataschool/gumbel_lstm/blob/master/gumbel_softmax.py#L81)
# afaik, unlike max, argmax (index of maximum) will have zero/NA gradient by definition
# since infinitely small changes in the vector won't change index of the maximum unless there are two exactly equal elements.
# From : https://github.com/pytorch/pytorch/issues/639
#def gumbel_sampler(input, tau, temperature):
# noise = torch.rand(input.size())
# noise.add_(1e-9).log_().neg_()
# noise.add_(1e-9).log_().neg_()
# noise = Variable(noise)
# x = (input + noise) / tau + temperature
# x = F.softmax(x.view(input.size(0), -1))
# return x.view_as(input)
# From : https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530
def sample_gumbel(input):
noise = torch.rand(input.size())
eps = 1e-20
noise.add_(eps).log_().neg_()
noise.add_(eps).log_().neg_()
res = Variable(noise)
if input.is_cuda:
res = res.cuda()
return res
def gumbel_softmax_sample(input, temperature=0.5):
noise = sample_gumbel(input)
x = (input + noise) / temperature
#x = F.log_softmax(x)
x = F.softmax(x)
return x.view_as(input)
class Harden(nn.Module):
# https://discuss.pytorch.org/t/cannot-override-torch-round-after-upgrading-to-the-latest-pytorch-version/6396 ?
def __init__(self, args):
super(Harden, self).__init__()
#self.y_onehot = torch.FloatTensor(args.batch_size, args.input_len)
#self.batch_size = args.batch_size
# https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/4
# https://discuss.pytorch.org/t/creating-one-hot-vector-from-indices-given-as-a-tensor/2171/3
# https://github.com/mrdrozdov-github/pytorch-extras#one_hot
def forward(self, vec):
#self.y_onehot.zero_()
#self.y_onehot.scatter_(1, vec, 1)
#return self.y_onehot
values, indices = vec.max(1)
y_onehot = torch.FloatTensor( vec.size() )
if vec.is_cuda:
y_onehot = y_onehot.cuda()
y_onehot.zero_()
y_onehot.scatter_(1, indices, 1)
return y_onehot
def backward(self, grads):
return grads # This is an identity pass-through
# https://github.com/jcjohnson/pytorch-examples
#class Harden(torch.autograd.Function):
# """
# We can implement our own custom autograd Functions by subclassing
# torch.autograd.Function and implementing the forward and backward passes
# which operate on Tensors.
# """
# def forward(self, input):
# """
# In the forward pass we receive a Tensor containing the input and return a
# Tensor containing the output. You can cache arbitrary Tensors for use in the
# backward pass using the save_for_backward method.
# """
# self.save_for_backward(input)
# return input.clamp(min=0)
#
# def backward(self, grad_output):
# """
# In the backward pass we receive a Tensor containing the gradient of the loss
# with respect to the output, and we need to compute the gradient of the loss
# with respect to the input.
# """
# input, = self.saved_tensors
# grad_input = grad_output.clone()
# grad_input[input < 0] = 0
# return grad_input
class RFESH(BasicModel):
def __init__(self, args):
super(RFESH, self).__init__(args, 'RFESH')
self.debug = args.debug
dtype = args.dtype
self.conv = ConvInputModel()
# output is 24 channels in a 5x5 grid
self.coord_extra_len = args.coord_extra_len
# prepare coord tensor
def cvt_coord(idx):
i, j = idx/5, idx%5
if self.coord_extra_len==2:
return [(i-2)/2., (j-2)/2.]
if self.coord_extra_len==6:
return [
(i-2)/2., (i%2), (1. if (i>0) else 0.),
(j-2)/2., (j%2), (1. if (j>0) else 0.),
]
np_coord_tensor = np.zeros((args.batch_size, 25, self.coord_extra_len))
for idx in range(25):
np_coord_tensor[:,idx,:] = np.array( cvt_coord(idx) ) / 10.
self.coord_tensor = Variable( torch.FloatTensor(args.batch_size, 25, self.coord_extra_len).type(dtype) )
self.coord_tensor.data.copy_(torch.from_numpy(np_coord_tensor))
self.question_size = 11
self.answer_size = 10
self.rnn_hidden_size = args.rnn_hidden_size # must be > question_size and answer_size
# 24+self.coord_extra_len+self.coord_extra_len = key_size + value_size
if self.coord_extra_len==2:
self.key_size = self.query_size = 10+2
self.value_size = 14+2
else: # coord_extra_len likely to be 6...
self.key_size = self.query_size = 14+6
self.value_size = 10+6
self.process_coords = args.process_coords
if self.process_coords:
print("Create additional 1x1 convolutions to process coords additionally per point")
self.coord_tensor_permuted = self.coord_tensor.permute(0,2,1)
d_in, d_out = 24+self.coord_extra_len, self.key_size+self.value_size
#print(d_in, d_out)
if not (d_out == 24+self.coord_extra_len+self.coord_extra_len):
print("Sizing of coordinate-enhanced 5x5 images does not match additional conv layers")
exit(1)
# These are 1d convs (since only 1x1 kernels anyway, and better shapes for below...)
self.conv1 = nn.Conv1d(d_in, d_in, kernel_size=1, padding=0)
self.batchNorm1 = nn.BatchNorm2d(d_in) # d_hidden==d_in here
self.conv2 = nn.Conv1d(d_in, d_out, kernel_size=1, padding=0)
self.batchNorm2 = nn.BatchNorm2d(d_out)
k_blank = torch.randn( (1, 1, self.key_size) ).type(dtype)
self.k_blank = Parameter(k_blank, requires_grad=True)
v_blank = torch.zeros( (1, 1, self.value_size) ).type(dtype)
self.v_blank = Variable(v_blank, requires_grad=False) # This is just fixed at ==0 == 'STOP'
#seq_len=8
#seq_len=4
#seq_len=2 # Works well enough to be on a par with RN
#seq_len=1
self.seq_len = args.seq_len
ent_stream_rnn1_hidden_pad = torch.randn( (1, self.rnn_hidden_size-self.question_size) ).type(dtype)
self.ent_stream_rnn1_hidden_pad = Parameter(ent_stream_rnn1_hidden_pad, requires_grad=True)
#print("ent_stream_rnn1_hidden_pad.size() : ", self.ent_stream_rnn1_hidden_pad.size()) # (5)
ent_stream_rnn1_start = torch.randn( (1, self.value_size) ).type(dtype)
self.ent_stream_rnn1_start = Parameter(ent_stream_rnn1_start, requires_grad=True)
self.ent_stream_rnn1 = nn.GRUCell(self.value_size, self.rnn_hidden_size) #input_size, hidden_size, bias=True)
ent_stream_rnn2_hidden = torch.randn( (1, self.rnn_hidden_size) ).type(dtype)
self.ent_stream_rnn2_hidden = Parameter(ent_stream_rnn2_hidden, requires_grad=True)
self.ent_stream_rnn2 = nn.GRUCell(self.rnn_hidden_size, self.rnn_hidden_size) #input_size, hidden_size, bias=True)
self.stream_rnn_to_query = nn.Linear(self.rnn_hidden_size, self.query_size)
self.highway=args.highway
if self.highway==1:
#self.stream_rnn_switcher = nn.Linear(self.rnn_hidden_size, self.rnn_hidden_size)
#self.stream_rnn_extra = nn.Linear(self.rnn_hidden_size, self.rnn_hidden_size)
# Highway(input_size, num_layers, f=torch.nn.functional.relu)
self.stream_rnn_highway = Highway(self.rnn_hidden_size, 1, f=F.relu)
# No parameters needed for softmax attention...
# Temperature for Gumbel?
stream_question_hidden_pad = torch.randn( (1, self.rnn_hidden_size-self.question_size) ).type(dtype)
self.stream_question_hidden_pad = Parameter(stream_question_hidden_pad, requires_grad=True)
self.stream_question_rnn = nn.GRUCell(self.value_size, self.rnn_hidden_size)
stream_answer_hidden = torch.randn( (1, self.rnn_hidden_size) ).type(dtype)
self.stream_answer_hidden = Parameter(stream_answer_hidden, requires_grad=True)
self.stream_answer_rnn = nn.GRUCell(self.rnn_hidden_size, self.rnn_hidden_size)
self.stream_answer_to_output = nn.Linear(self.rnn_hidden_size, self.answer_size)
#for param in self.parameters():
# print(type(param.data), param.size())
self.optimizer = optim.Adam(self.parameters(), lr=args.lr)
if False:
self.question_to_query_1 = nn.Linear(self.question_size, self.rnn_hidden_size)
self.question_to_query_2 = nn.Linear(self.rnn_hidden_size, self.query_size)
def forward(self, img, qst):
x = self.conv(img) ## x = (64 x 24 x 5 x 5) = (batch#, channels, x-s, y-s)
"""g"""
batch_size = x.size()[0] # minibatch
n_channels = x.size()[1] # output features of CNN (24 normally or 28 if process_coords)
d = x.size()[2] # grid size over image
if self.process_coords:
# Add in the coordinates here...
#print("process_coords : x_from-cnn.size(): ", x.size())
x_flatter = x.view(batch_size, n_channels, d*d)
#print("x_flatter.size(): ", x_flatter.size())
#print("coord_tensor.size(): ", self.coord_tensor.size())
#print("coord_tensor.permuted.size(): ", self.coord_tensor.permute(0,2,1).size())
#print("coord_tensor_permuted.size(): ", self.coord_tensor_permuted.size())
#x_plus = torch.cat([x_flatter, self.coord_tensor.permute(0,2,1) ], 1)
x_plus = torch.cat([x_flatter, self.coord_tensor_permuted ], 1)
#print("x_plus.size(): ", x_plus.size())
x = self.conv1(x_plus)
x = F.relu(x)
x = self.batchNorm1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.batchNorm2(x)
#print("x_after-1x1s.size(): ", x.size()) # 32,28,25
x_flat = x.view(batch_size, self.key_size+self.value_size, d*d).permute(0,2,1)
# x_flat = (64 x 25 x 28)
ks_image = x_flat.narrow(2, 0, self.key_size)
vs_image = x_flat.narrow(2, self.key_size, self.value_size)
else:
#print("Just concat coordinates : x_from-cnn.size(): ", x.size())
x_flat = x.view(batch_size, n_channels, d*d).permute(0,2,1)
# x_flat = (64 x 25 x 24)
ks_nocoords = x_flat.narrow(2, 0, self.key_size-self.coord_extra_len)
vs_nocoords = x_flat.narrow(2, self.key_size-self.coord_extra_len, self.value_size-self.coord_extra_len)
# add coordinates (since these haven't been included yet)
ks_image = torch.cat([ks_nocoords, self.coord_tensor], 2)
vs_image = torch.cat([vs_nocoords, self.coord_tensor], 2)
#print("ks_image.size() : ", ks_image.size()) # (32,25,12)
#print("vs_image.size() : ", vs_image.size()) # (32,25,16)
# add the 'end of choices' element
#print("self.k_blank.size() : ", self.k_blank.size()) # (1,1,12)
#print("self.k_blank.expand().size() : ", self.k_blank.expand( (batch_size, 1, self.key_size) ).size() ) # (32,1,12)
ks = torch.cat([ks_image, self.k_blank.expand( (batch_size, 1, self.key_size) )], 1)
#print("ks.size() : ", ks.size()) # (32,26,12)
vs = torch.cat([vs_image, self.v_blank.expand( (batch_size, 1, self.value_size) )], 1)
#print("vs.size() : ", vs.size()) # (32,26,16)
#print("qst.size() : ", qst.size()) # (32,11)
seq_len = self.seq_len
ent_stream_rnn1_hidden = torch.cat(
[qst, self.ent_stream_rnn1_hidden_pad.expand( (batch_size, self.rnn_hidden_size-self.question_size) )], 1)
#print("ent_stream_rnn_hidden.size() : ", ent_stream_rnn_hidden.size()) # (32,16)
ent_stream_rnn1_input = self.ent_stream_rnn1_start.expand( (batch_size, self.value_size) )
ent_stream_rnn2_hidden = self.ent_stream_rnn2_hidden.expand( (batch_size, self.rnn_hidden_size) )
stream_logits, ent_similarities, ent_weights_arr, stream_values = [],[],[],[] # Will be filled by RNN and attention process
for i in range(seq_len): # HUGE CHANGE
#if False:
#print("ent_stream_rnn_input.size() : ", ent_stream_rnn_input.size()) # (32,16)
#print("ent_stream_rnn_hidden.size() : ", ent_stream_rnn_hidden.size()) # (32,16)
ent_stream_rnn1_hidden = self.ent_stream_rnn1(ent_stream_rnn1_input, ent_stream_rnn1_hidden)
ent_stream_rnn2_hidden = self.ent_stream_rnn2(ent_stream_rnn1_hidden, ent_stream_rnn2_hidden)
# Works a tiny bit better than without
#ent_stream_rnn2_hidden = F.relu(ent_stream_rnn2_hidden)
if self.highway==1:
#ent_stream_rnn2_hidden_save = ent_stream_rnn2_hidden
#ent_stream_rnn2_hidden = ent_stream_rnn2_hidden_save
# this seems to get stuck...
ent_stream_rnn2_hidden = self.stream_rnn_highway(ent_stream_rnn2_hidden)
# Try this after highway, rather than before
ent_stream_rnn2_hidden = F.relu(ent_stream_rnn2_hidden)
ent_stream_logits = ent_stream_rnn2_hidden
if self.debug:
stream_logits.append( ent_stream_logits )
# Convert the ent_stream hidden layer to a query via a Linear unit
qs = self.stream_rnn_to_query( ent_stream_logits )
#print("qs.size() : ", qs.size()) # (32,12)
#print("qs.unsqueeze(2).size() : ", torch.unsqueeze(qs, 2).size()) # (32,12,1)
# Now do the dot-product with the keys (flattened image-like)
ent_similarity = torch.bmm( ks, torch.unsqueeze(qs, 2) )
#print("ent_similarity.size() : ", ent_similarity.size()) # (32,26,1)
ent_logits = torch.squeeze( ent_similarity )
# These are zero-centered, but not variance squashed
#ent_logits = ent_logits - torch.mean( ent_logits, 1, keepdim=True)
if self.debug:
ent_similarities.append( ent_logits )
#if True:
# # Softmax to get the weights
# #ent_weights = torch.nn.Softmax()( torch.squeeze( ent_similarity) ) #WORKED
# ent_weights = F.softmax( torch.squeeze( ent_similarity) )
#
#if False:
# # Gumbel-Softmax to get the weights:
# ent_weights = gumbel_softmax_sample( torch.squeeze( ent_similarity), temperature=0.2 )
#print("ent_weights.size() : ", ent_weights.size()) # (32,26)
#print("ent_weights.unsqueeze(2).size() : ", torch.unsqueeze(ent_weights,2).size()) # (32,26,1)
#print("ent_weights.unsqueeze(1).size() : ", torch.unsqueeze(ent_weights,1).size()) # (32,1,26)
# ent_weights is like 'actions' derived from the 'soft' ent_logits (see Minimal-Soft-vs-Hard-Max notebook)
adjusted_actions = ent_logits.clone()
if self.training:
gumbel = sample_gumbel( ent_logits )
adjusted_actions += gumbel * 1.0
#adjusted_actions += gumbel * 0.5
#adjusted_actions += gumbel * 2.0
else:
action_max, action_max_idx = torch.max(adjusted_actions, 1, keepdim=True)
adjusted_actions[:,:] = 0.
adjusted_actions.scatter_(1, action_max_idx, 5.0) # This is just 1 at the argmax (no need for differentiability
if True: # 'plain'
action_weights = adjusted_actions #*2.0
if False:
action_max, action_max_idx = torch.max(adjusted_actions, 1, keepdim=True)
if False:
# This has a min of zero, which leads to the possibility of a 'near-zero everywhere' choice for the max
action_weights = ent_logits.clone() # Just to get the shape
action_weights[:,:] = 0.
action_weights.scatter_(1, action_max_idx, action_max+5.0) # Force e^5 extra emphasis
if False:
action_min, action_min_idx = torch.min(adjusted_actions, 1, keepdim=True)
# Enforce the min to be everywhere, so the max 'sticks out' more
#action_weights = action_min.expand( (batch_size, vs.size()[1]) ).clone()
action_weights = action_min.expand( (batch_size, self.value_size) ).clone()
#print(action_weights.size(), action_min.size())
action_weights.scatter_(1, action_max_idx, action_max)
ent_weights = F.softmax( action_weights )
#print(ent_weights)
if self.debug:
ent_weights_arr.append( ent_weights )
# Now multiply through to get the resulting values
stream_next_value = torch.squeeze( torch.bmm( torch.unsqueeze(ent_weights,1), vs ) )
#print("stream_next_value.size() : ", stream_next_value.size()) # (32, 16)
stream_values.append(stream_next_value)
ent_stream_rnn1_input = stream_next_value
### Entity stream now in stream_values[] as a list of vectors of length self.value_size
# HUGE CHANGE END
if False: # HUGE CHANGE ALTERNATIVE
# Convert the question to something like a query
# Dot the query with all the ks
# Find the list of all the best k_indexes
# Convert those k_indexes to vs (pushing them onto stream_values[], initialized to be empty)
hid1 = self.question_to_query_1( qst )
hid1_out = F.relu( hid1 )
qs = self.question_to_query_2( hid1_out )
ent_similarity = torch.bmm( ks, torch.unsqueeze(qs, 2) ) # batch_size, 26, 1
ent_logits = torch.squeeze( ent_similarity ) # batch_size, 26
#print("ent_logits.size() : ", ent_logits.size()) # batch_size, 26
# torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
query_matches_mat, query_matches_idx = torch.topk(ent_logits, seq_len)
#print("query_matches_idx.size() : ", query_matches_idx.size()) # batch_size, seq_len
#print(query_matches_idx[0]) # Numbers in range [0, 25+1)
#print("vs.size() : ", vs.size()) # batch_size, 26, 16
#vs_at_idxs = torch.gather( vs, 1, query_matches_idx )
#print("vs_at_idxs.size() : ", vs_at_idxs.size()) #
stream_values=[]
view_unrolled = torch.arange(0, batch_size*26, 26).type(torch.LongTensor)
#print("view_unrolled", view_unrolled)
for i in range(seq_len):
idxs = query_matches_idx[:, i] # every index across the batch
#print("vs.size() : ", vs.size()) # batch_size, 26, value_size
#print("idxs.size() : ", idxs.size()) # batch_size
#print("idxs : ", idxs) # torch.cuda.LongTensor
#vs_at_idxs = vs[:, idxs.cpu(), :] #?? Fails
#vs_at_idxs = torch.index_select( vs, 1, idxs )
#print("vs_at_idxs.size() : ", vs_at_idxs.size()) # batch_size, vs_size #[32, 32, 16]??
# Test idea : b = torch.Tensor([[[1,101],[2,102],[12,112]],[[3,103],[4,104],[34,134]],[[5,105],[6,106],[56,156]]])
idxs_unrolled = torch.add(idxs, view_unrolled) # .cuda()
print("idxs_unrolled", idxs_unrolled)
vs_at_idxs = torch.index_select( vs.view(batch_size*26, self.value_size), 1, idx_unrolled )
#print("vs_at_idxs.size() : ", vs_at_idxs.size()) # batch_size, vs_size
stream_values.append( vs_at_idxs )
# HUGE CHANGE ALTERNATIVE END
# Now interpret the values from the stream
stream_question_hidden = torch.cat(
[qst, self.stream_question_hidden_pad.expand( (batch_size, self.rnn_hidden_size-self.question_size) )], 1)
stream_answer_hidden = self.stream_answer_hidden.expand( (batch_size, self.rnn_hidden_size) )
#print("stream_answer_hidden0", stream_answer_hidden)
stream_answer_hidden_arr = []
for stream_question_rnn_input in stream_values:
#print("stream_question_rnn_input.size() : ", stream_question_rnn_input.size()) # (32,16)
#print("stream_question_hidden.size() : ", stream_question_hidden.size()) # (32,16)
stream_question_hidden = self.stream_question_rnn(stream_question_rnn_input, stream_question_hidden)
#print("stream_question_hidden.size() : ", stream_question_hidden.size()) # (32,16)
#print("stream_answer_hidden.size() : ", stream_answer_hidden.size()) # (32,16)
stream_answer_hidden = self.stream_answer_rnn(stream_question_hidden, stream_answer_hidden)
#print("stream_answer_hidden", stream_answer_hidden)
stream_answer_hidden_arr.append( stream_answer_hidden )
# Final answer is in stream_answer_hidden (final value)
#ans = stream_answer_hidden.narrow(1, 0, self.answer_size) # No: Let's do a final linear on it...
#print("ans.size() : ", ans.size()) # (32,10)
if self.highway==2: # [][32batch, 32hidden]
stream_answer_hidden_max = torch.cat( stream_answer_hidden_arr, 1)
#print("stream_answer_hidden_max.size() : ", stream_answer_hidden_max.size()) # (32,32)
#ans = self.stream_answer_to_output( )
ans = self.stream_answer_to_output( stream_answer_hidden ) # Temp
else:
ans = self.stream_answer_to_output( stream_answer_hidden )
if self.debug:
self.stream_logits = stream_logits
self.ent_similarities = ent_similarities
self.ent_weights_arr = ent_weights_arr
self.stream_values = stream_values
self.ans_logits = ans
return F.log_softmax(ans) # log_softmax is what's expected
|
|
# -*- coding: utf-8-*-
"""
Author: Marco Dinacci <dev@dinointeractive.com>
License: BSD
World Editor
TODO
* to deal with entities I need an Entity Manager (see also multifiles).
It's ok to have multiple references
but there must be only a single place where to manage them.
* (re)code everything to use an EventManager (see taskMgr)
- change cell specific settings:
- model (material, lights, texture)
- color (optional)
- create multiple surfaces in ODE and bind them to cells ?
* implement scene save/load
- better ball physics (fix the fact that it never stops)
- better camera for the ball, must have constant X position and
constant Y distance
- new cell models to implement elevation
- curves :O
- fix the logger
* Configuration manager, all the parameters must be read from disk
- use egg-qtess to polygonize a NURBS surface
- I need a python shell inside the editor !
- use Panda3D multifiles to store entities !
- search useless imports and remove them
* implement messaging system
- optimize scene. Have four nodes: staticobjs, actors, sky, evrthng with alpha
"""
# useful for debugging
from mdlib.decorator import traceMethod, accepts, trace, dumpArgs
# load configuration
# TODO the ConfigurationManager should take care of this
from pandac.PandaModules import loadPrcFile, ConfigVariableString, ConfigVariableBool
loadPrcFile("../res/Config.prc")
loadPrcFile("../res/Editor.prc")
# panda 3d stuff
from direct.showbase.ShowBase import ShowBase
from direct.showbase.DirectObject import DirectObject
from direct.directtools.DirectGeometry import LineNodePath
from pandac.PandaModules import Point3, Vec4, Vec3, NodePath, Quat
from pandac.PandaModules import LightAttrib, AmbientLight, DirectionalLight
from pandac.PandaModules import EggData, Filename, BamFile
from pandac.PandaModules import WindowProperties
# collision to pick entities with mouse
from pandac.PandaModules import CollisionNode, CollisionHandlerQueue, \
CollisionTraverser, CollisionRay, GeomNode
# panda utilities and actors
from mdlib.panda import eventCallback, inputCallback, guiCallback, MouseWatcher
from mdlib.panda.core import *
from mdlib.panda.camera import *
from mdlib.panda.entity import *
from mdlib.panda.input import *
from mdlib.panda.data import *
from mdlib.panda.entity import *
from mdlib.panda.physics import POM
from mdlib.panda import event
# logging
from mdlib.log import ConsoleLogger, DEBUG,WARNING
logger = ConsoleLogger("editor", DEBUG)
# for debugging
import echo
# editor imports
from PyQt4.QtGui import QApplication
from gui.qt import EditorGUI
from gui import GUIPresenter
#echo.echo_class(EditorGUI)
import cPickle, time
from sys import exit
SCENE_FORMAT_VERSION = "0.1.1"
class EditorScene(AbstractScene):
def __init__(self):
super(EditorScene, self).__init__()
self._camera = None
# create initial lights
self._setupLights()
def _setupLights(self):
#Create some lights and add them to the scene. By setting the lights on
#render they affect the entire scene
#Check out the lighting tutorial for more information on lights
lAttrib = LightAttrib.makeAllOff()
ambientLight = AmbientLight( "ambientLight" )
ambientLight.setColor( Vec4(.4, .4, .35, 1) )
lAttrib = lAttrib.addLight( ambientLight )
directionalLight = DirectionalLight( "directionalLight" )
directionalLight.setDirection( Vec3( 0, 8, -2.5 ) )
directionalLight.setColor( Vec4( 0.9, 0.8, 0.9, 1 ) )
lAttrib = lAttrib.addLight( directionalLight )
self._rootNode.attachNewNode( directionalLight.upcastToPandaNode() )
self._rootNode.attachNewNode( ambientLight.upcastToPandaNode() )
self._rootNode.node().setAttrib( lAttrib )
"""
def deleteEntityFromNodePath(self, nodePath):
# FIXME must remove entity IF it is an entity (maybe just a tree)
nodePath.hideBounds()
nodePath.removeNode()
"""
camera = property(fget=lambda self: self._camera,
fset=lambda self,cam: setattr(self, '_camera', cam))
class EditorView(AbstractView):
_scene = EditorScene()
def __init__(self, inputMgr):
super(EditorView, self).__init__(inputMgr)
def enable(self):
# reenable camera controller
self.camera.setActive(True)
self.scene.camera = self.camera
self._inputMgr.switchSchemeTo(self.INPUT_SCHEME)
def disable(self):
# disable camera controller
self.camera.setActive(False)
self._inputMgr.switchSchemeTo(BASE_SCHEME)
def readInput(self, task):
self._inputMgr.update()
return task.cont
def update(self, task):
# entity position is updated automatically by the physic manager by
# setting parameters for position and rotation in params.
# TODO
# update GUI
self.scene.camera.update()
self.scene.update()
return task.cont
def render(self, task):
self.scene.render()
return task.cont
def setSceneRootNode(self, node):
self.scene.setRootNodeParent(node)
def addToScene(self, entity):
self._scene.addEntity(entity)
def _registerToCommands(self):
self._inputMgr.bindEvent("escape", event.REQUEST_SHUTDOWN,
scheme="base")
self._inputMgr.bindEvent("1", event.SWITCH_VIEW, ["roaming"],
scheme="base")
self._inputMgr.bindEvent("2", event.SWITCH_VIEW, ["editing"],
scheme="base")
self._inputMgr.bindEvent("3", event.SWITCH_VIEW, ["simulating"],
scheme="base")
self._inputMgr.bindEvent("4", event.SWITCH_VIEW, ["debugging"],
scheme="base")
self._inputMgr.bindCallback("0", self.scene.camera.lookAtOrigin)
def _subscribeToEvents(self):
pass
scene = property(fget = lambda self: self._scene, fset=None)
class RoamingView(EditorView):
"""
This mode allows to 'roam' freely inside the world.
"""
INPUT_SCHEME = "roaming"
def _setupCamera(self):
self.camera = RoamingCamera(self._inputMgr)
self.camera.setPos(0,-40,15)
self.camera.lookAt(0,0,0)
self.scene.camera = self.camera
def _registerToCommands(self):
super(RoamingView, self)._registerToCommands()
self._inputMgr.createSchemeAndSwitch(self.INPUT_SCHEME)
def enable(self):
self.camera.showCursor(False)
super(RoamingView, self).enable()
class EditingView(EditorView):
"""
The editing view is the most sophisticated view.
It transform the editor in a world editor allowing to insert
and to position objects.
Messages sent here are received by the GUI
Accepted inputs:
- space -> add a new row
- mouse1 press -> select a node
"""
INPUT_SCHEME = "editing"
def __init__(self, inputMgr):
super(EditingView, self).__init__(inputMgr)
self._setupCollisionDetection()
self._selectedObj = None
def getSelectedEntity(self):
if self._selectedObj is not None:
entity = self.scene.getEntityByID(int(self._selectedObj.getNetTag("UID")))
return entity
def deleteFromScene(self, entity):
self.scene.deleteEntity(entity)
def deleteSelectedObject(self):
if self._selectedObj is not None:
logger.debug("Deleting selected entity: %s " % self._selectedObj)
self.scene.deleteEntityByID(int(self._selectedObj.getNetTag("UID")))
#self.scene.deleteEntityFromNodePath(self._selectedObj) <-- delete single mesh
self._selectedObj = None
else:
logger.info("Nothing selected, can't delete")
def enable(self):
self.camera.showCursor(True)
super(EditingView, self).enable()
def disable(self):
super(EditingView, self).disable()
@inputCallback
def _onMousePress(self):
mousePos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(self.scene.camera, mousePos.getX(),
mousePos.getY())
self.picker.traverse(self.scene.getRootNode())
entries = self.pq.getNumEntries()
logger.debug("Ray collided with %d entries" % entries)
if entries > 0:
if self._selectedObj is not None:
self._selectedObj.hideBounds()
self.pq.sortEntries()
for i in range(0, entries):
pickedObject = self.pq.getEntry(i).getIntoNodePath()
logger.debug("Picked object #%d = %s" % (i, pickedObject))
# highlight the closest selected object
pickedObject = self.pq.getEntry(0).getIntoNodePath()
pickedObject.showTightBounds()
# set it current and send a msg that a new entity has been selected
#self._selectedObj = self.scene.getEntityByID(pickedObject.getNetTag("ID"))
self._selectedObj = pickedObject
entity = self.scene.getEntityByID(pickedObject.getNetTag("UID"))
logger.debug("Set selected object to: %s" % entity)
messenger.send(event.SELECT_ENTITY, [entity])
else:
logger.debug("No collisions at: %s" % mousePos)
def _setupCollisionDetection(self):
self.picker = CollisionTraverser()
self.pq = CollisionHandlerQueue();
self.pickerNode = CollisionNode("entityPickRay")
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.picker.addCollider(self.pickerNP, self.pq)
def _setupCamera(self):
self.camera = FixedCamera(self._inputMgr)
self.camera.setPos(0,-40,15)
self.camera.lookAt(0,0,0)
self.scene.camera = self.camera
def _registerToCommands(self):
super(EditingView, self)._registerToCommands()
self._inputMgr.createSchemeAndSwitch(self.INPUT_SCHEME)
self._inputMgr.bindEvent("space", event.NEW_ROW)
self._inputMgr.bindCallback("mouse1", self._onMousePress)
class SimulatingView(EditorView):
"""
This mode simulates the game.
Accepted inputs:
- i -> move the ball forward
- j -> move the ball left
- k -> move the ball back
- l -> move the ball right
"""
INPUT_SCHEME = "simulating"
def __init__(self, inputMgr):
super(SimulatingView, self).__init__(inputMgr)
self._isPlayerSet = False
def _setupCamera(self):
self.camera = TheBallCamera(self._inputMgr)
self.camera.setPos(0,-40,15)
self.camera.lookAt(0,0,0)
self.scene.camera = self.camera
def _registerToCommands(self):
super(SimulatingView, self)._registerToCommands()
self._inputMgr.createSchemeAndSwitch(self.INPUT_SCHEME)
self._inputMgr.bindEvent("i", event.MOVE_PLAYER, [0,1.5,0])
self._inputMgr.bindEvent("i-up", event.MOVE_PLAYER, [0,0,0])
self._inputMgr.bindEvent("j", event.MOVE_PLAYER, [-2,0,0])
self._inputMgr.bindEvent("j-up", event.MOVE_PLAYER, [0,0,0])
self._inputMgr.bindEvent("k", event.MOVE_PLAYER, [0,-0.5,0])
self._inputMgr.bindEvent("k-up", event.MOVE_PLAYER, [0,0,0])
self._inputMgr.bindEvent("l", event.MOVE_PLAYER, [2,0,0])
self._inputMgr.bindEvent("l-up", event.MOVE_PLAYER, [0,0,0])
def setPlayer(self, actorID):
entity = self.scene.getEntityByID(actorID)
self.camera.setTarget(entity.render.nodepath)
self._isPlayerSet = True
logger.debug("Player set to: %s" % entity)
def enable(self):
self.camera.showCursor(False)
super(SimulatingView, self).enable()
# TODO create actors and create geometry from here using the physic manager
class EditorLogic(AbstractLogic):
"""
The editor allows to construct the games by managing 3D objects,
it allows also to debug and test the game.
TODO create a SceneDelegate object to deal with scene stuff
"""
def __init__(self, view):
super(EditorLogic, self).__init__(view)
# copied objects are stored here.
self._copyMemory = []
self._sceneFile = '/home/mdinacci/Work/MD/rtw/editor/res/scenes/editor_start_1.rtw'
#self.loadScene(self._sceneFile)
self._createInitialScene()
def getSavedScene(self):
return self._sceneFile
def _createInitialScene(self):
# create some background entities to populate a bit the space
self.view.addToScene(GOM.createEntity(environment_params.copy()))
#self._track = GOM.createEntity(track_params.copy())
#self.view.addToScene(self._track)
# create player
self._player = GOM.createEntity(golfball.copy())
self.view.addToScene(self._player)
def _subscribeToEvents(self):
self.listener = SafeDirectObject()
self.listener.accept(event.NEW_ROW, self.addRow)
self.listener.accept(event.MOVE_PLAYER, self._movePlayer)
@eventCallback
def _movePlayer(self, xForce, yForce, zForce):
logger.info("Moving player with vector force: %d,%d,%d"
% (xForce, yForce, zForce))
#entity = self.view.scene.getEntityByID(self._player.UID)
# FIXME refactor
path = getPropertyPath("xForce")
self.view.scene.editEntity(self._player.UID, path, xForce)
path = getPropertyPath("yForce")
self.view.scene.editEntity(self._player.UID, path, yForce)
path = getPropertyPath("zForce")
self.view.scene.editEntity(self._player.UID, path, zForce)
@eventCallback
def addRow(self):
for entity in self._track.createRow():
self.view.addToScene(entity)
@guiCallback
def loadScene(self, sceneFile):
fh = open(sceneFile, "rb")
# load function
load = lambda: cPickle.load(fh)
version = load()
entitiesNum = load()
entities = [self.view.addToScene(GOM.createEntity(load()))
for idx in range(0, entitiesNum)]
# set player and track
self._player = self.view.scene.getEntityByName("Ball")
self._track = self.view.scene.getEntityByName("Track")
@guiCallback
def hasSavedScene(self):
return self._sceneFile != ''
@guiCallback
def saveScene(self, sceneFile):
# TODO save to a multifile
fh = open(sceneFile, "wb")
# save function
dump = lambda x: cPickle.dump(x, fh, -1)
# get the serialised data from the scene
entities = self.view.scene.serialise()
# store version
dump(SCENE_FORMAT_VERSION)
# store the number of entities, useful when unpickling
dump(len(entities))
# save entities
[dump(entity) for entity in entities]
fh.close()
logger.info("Scene file saved to %s" % sceneFile )
self._sceneFile = sceneFile
@guiCallback
def addEntityFromFile(self, fileName):
pass
@guiCallback
def deleteSelectedObject(self):
entity = self.view.getSelectedEntity()
if entity.has_key("physics") and entity.physics.has_key("geom"):
POM.removeGeometryTo(entity)
self.view.deleteFromScene(entity)
@guiCallback
def copySelectedObject(self):
entity = self.view.getSelectedEntity()
if entity is not None:
self._copyMemory.append(entity)
@guiCallback
def editObject(self, eid, property, newValue):
self.view.scene.editEntity(eid, property, newValue)
@guiCallback
def pasteSelectedObject(self):
if len(self._copyMemory) > 0:
params = self._copyMemory.pop().serialise()
# slightly shifts the pasted object respect the original
params.position.x += 2
params.position.z += 2
# I need to create a new ID for the pasted entity, I can't rely
# on GOM because it will reuses the existing one, therefore creating
# an entity with the same ID as the copied one.
newUid = GOM.generateUID()
params._uid = newUid
self.view.addToScene(GOM.createEntity(params))
def showPlayer(self):
logger.debug("Showing player")
if hasattr(self._view,"setPlayer"):
self._view.setPlayer(self._player.UID)
def hidePlayer(self):
""" Hide the ball as we need it only in simulating mode """
logger.debug("Hiding player")
self._view.scene.hideEntityByID(self._player.UID)
def update(self, task):
# TODO
# update game state
# run ai behavior
# trigger new events
# run physics simulation
POM.update(self.view.scene)
# update particle systems
# moves animation forward for visible characters
# update player's position and cameras
return task.cont
class EditorApplication(AbstractApplication):
dta = 0
def __init__(self, qtApp):
super(EditorApplication, self).__init__()
self._isRunning = True
self._isPaused = False
self._qtApp = qtApp
def step(self):
taskMgr.step()
#self.dta += globalClock.getDt()
#while self.dta > self.stepSize:
# self.dta -= self.stepSize
# taskMgr.step()
#time.sleep(0.0001)
def shutdown(self):
logger.info("Shutdown requested")
self._isRunning = False
def restore(self):
self._isRunning = True
self.dta = 0
taskMgr.step()
self.run()
def run(self):
"""
Main loop of the application
First step, create the processes that will be constantly updated
Second, run them.
Third, destroy them
Now the loop is handled by QT, so all the tasks are executed when the
QT decides to execute the idle function I set up.
"""
logger.debug("Starting application")
# Create processes
self._createProcesses()
self._gui.show()
self._qtApp.exec_()
def _createProcesses(self):
# Start processes in the correct order
# - logic update
# - physic update, logic takes care
# - view update
# - input update view does it
# - scene update view does it
# - gui update view does it
# - view render
# - scene render view does it
# - gui render view does it
logger.debug("Creating processes")
taskMgr.add(self._view.readInput, "read-input")
taskMgr.add(self._logic.update, "logic-update")
taskMgr.add(self._view.update, "view-update")
taskMgr.add(self._view.render, "view-render")
#taskMgr.add(self._mouseWatcher.update, "mw-update")
def _shutDownProcesses(self):
taskMgr.stop()
self.nbase.userExit()
@eventCallback
def _switchView(self, view):
if view in self._views.keys():
# don't switch to the same view
if self._view != self._views[view]:
logger.debug("Switching to %s view" % view)
self._view.disable()
self._view = self._views[view]
self._logic.view = self._view
self._view.enable()
if view is "simulating":
self._logic.showPlayer()
else:
logger.error("View %s doesn't exists" % view)
def _subscribeToEvents(self):
self.listener = SafeDirectObject()
self.listener.accept(event.SWITCH_VIEW, self._switchView)
self.listener.accept(event.REQUEST_SHUTDOWN, self.shutdown)
def _createLogicAndView(self):
# TODO override ShowBase in order to use only what we really need
self.nbase = ShowBase()
self.nbase.windowType = "onscreen"
#taskMgr.popupControls()
self._mouseWatcher = MouseWatcher(self.nbase)
self._guiPresenter = GUIPresenter()
self._guiPresenter.setIdleCallback(self.step)
self._gui = EditorGUI(self._guiPresenter)
winHandle = self._gui.getHandle()
wp = WindowProperties().getDefault()
wp.setOrigin(0,0)
wp.setSize(self._gui.width(), self._gui.height())
wp.setParentWindow(int(winHandle)) # must be an int or it won't work on windows
self.nbase.openDefaultWindow(startDirect=False, props=wp)
self._gui.setPandaWindow(self.nbase.win)
inp = InputManager(self.nbase)
self._views = {"editing": EditingView(inp),
"roaming": RoamingView(inp),
"simulating": SimulatingView(inp)}
self._view = self._views["roaming"]
self._view.enable()
self._view.setSceneRootNode(self.nbase.render)
self._logic = EditorLogic(self._view)
# don't change the order
#self._guiPresenter.setPandaController(self._views["editing"])
self._guiPresenter.setPandaController(self._logic)
self._guiPresenter.setView(self._gui)
# FIXME
self._guiPresenter.setModel(self._views["editing"].scene)
# set a fixed frame rate
from pandac.PandaModules import ClockObject
FPS = 40
globalClock = ClockObject.getGlobalClock()
globalClock.setMode(ClockObject.MLimited)
globalClock.setFrameRate(FPS)
if __name__ == "__main__":
edApp = EditorApplication(QApplication(['']))
edApp.run()
|
|
"""
neural network stuff, intended to be used with Lasagne
"""
import numpy as np
import theano as th
import theano.tensor as T
import lasagne
from lasagne.layers import dnn
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
# T.nnet.relu has some stability issues, this is better
def relu(x):
return T.maximum(x, 0)
def lrelu(x, a=0.2):
return T.maximum(x, a*x)
def centered_softplus(x):
return T.nnet.softplus(x) - np.cast[th.config.floatX](np.log(2.))
def log_sum_exp(x, axis=1):
m = T.max(x, axis=axis)
return m+T.log(T.sum(T.exp(x-m.dimshuffle(0,'x')), axis=axis))
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
t = th.shared(np.cast[th.config.floatX](1.))
for p, g in zip(params, grads):
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v_t = mom1*v + (1. - mom1)*g
mg_t = mom2*mg + (1. - mom2)*T.square(g)
v_hat = v_t / (1. - mom1 ** t)
mg_hat = mg_t / (1. - mom2 ** t)
g_t = v_hat / T.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append((v, v_t))
updates.append((mg, mg_t))
updates.append((p, p_t))
updates.append((t, t+1))
return updates
class WeightNormLayer(lasagne.layers.Layer):
def __init__(self, incoming, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.),
W=lasagne.init.Normal(0.05), train_g=False, init_stdv=1., nonlinearity=relu, **kwargs):
super(WeightNormLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = nonlinearity
self.init_stdv = init_stdv
k = self.input_shape[1]
if b is not None:
self.b = self.add_param(b, (k,), name="b", regularizable=False)
if g is not None:
self.g = self.add_param(g, (k,), name="g", regularizable=False, trainable=train_g)
if len(self.input_shape)==4:
self.axes_to_sum = (0,2,3)
self.dimshuffle_args = ['x',0,'x','x']
else:
self.axes_to_sum = 0
self.dimshuffle_args = ['x',0]
# scale weights in layer below
incoming.W_param = incoming.W
#incoming.W_param.set_value(W.sample(incoming.W_param.get_value().shape))
if incoming.W_param.ndim==4:
if isinstance(incoming, Deconv2DLayer):
W_axes_to_sum = (0,2,3)
W_dimshuffle_args = ['x',0,'x','x']
else:
W_axes_to_sum = (1,2,3)
W_dimshuffle_args = [0,'x','x','x']
else:
W_axes_to_sum = 0
W_dimshuffle_args = ['x',0]
if g is not None:
incoming.W = incoming.W_param * (self.g/T.sqrt(1e-6 + T.sum(T.square(incoming.W_param),axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)
else:
incoming.W = incoming.W_param / T.sqrt(1e-6 + T.sum(T.square(incoming.W_param),axis=W_axes_to_sum,keepdims=True))
def get_output_for(self, input, init=False, **kwargs):
if init:
m = T.mean(input, self.axes_to_sum)
input -= m.dimshuffle(*self.dimshuffle_args)
inv_stdv = self.init_stdv/T.sqrt(T.mean(T.square(input), self.axes_to_sum))
input *= inv_stdv.dimshuffle(*self.dimshuffle_args)
self.init_updates = [(self.b, -m*inv_stdv), (self.g, self.g*inv_stdv)]
elif hasattr(self,'b'):
input += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(input)
def weight_norm(layer, **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = lasagne.nonlinearities.identity
if hasattr(layer, 'b'):
del layer.params[layer.b]
layer.b = None
return WeightNormLayer(layer, nonlinearity=nonlinearity, **kwargs)
class Deconv2DLayer(lasagne.layers.Layer):
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
super(Deconv2DLayer, self).__init__(incoming, **kwargs)
self.target_shape = target_shape
self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
self.filter_size = lasagne.layers.dnn.as_tuple(filter_size, 2)
self.stride = lasagne.layers.dnn.as_tuple(stride, 2)
self.target_shape = target_shape
self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
self.W = self.add_param(W, self.W_shape, name="W")
if b is not None:
self.b = self.add_param(b, (target_shape[1],), name="b")
else:
self.b = None
def get_output_for(self, input, **kwargs):
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=self.target_shape, kshp=self.W_shape, subsample=self.stride, border_mode='half')
activation = op(self.W, input, self.target_shape[2:])
if self.b is not None:
activation += self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
def get_output_shape_for(self, input_shape):
return self.target_shape
# minibatch discrimination layer
class MinibatchLayer(lasagne.layers.Layer):
def __init__(self, incoming, num_kernels, dim_per_kernel=5, theta=lasagne.init.Normal(0.05),
log_weight_scale=lasagne.init.Constant(0.), b=lasagne.init.Constant(-1.), **kwargs):
super(MinibatchLayer, self).__init__(incoming, **kwargs)
self.num_kernels = num_kernels
num_inputs = int(np.prod(self.input_shape[1:]))
self.theta = self.add_param(theta, (num_inputs, num_kernels, dim_per_kernel), name="theta")
self.log_weight_scale = self.add_param(log_weight_scale, (num_kernels, dim_per_kernel), name="log_weight_scale")
self.W = self.theta * (T.exp(self.log_weight_scale)/T.sqrt(T.sum(T.square(self.theta),axis=0))).dimshuffle('x',0,1)
self.b = self.add_param(b, (num_kernels,), name="b")
def get_output_shape_for(self, input_shape):
return (input_shape[0], np.prod(input_shape[1:])+self.num_kernels)
def get_output_for(self, input, init=False, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
activation = T.tensordot(input, self.W, [[1], [0]])
abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
+ 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))
if init:
mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
f = T.sum(T.exp(-abs_dif),axis=2)
if init:
mf = T.mean(f,axis=0)
f -= mf.dimshuffle('x',0)
self.init_updates.append((self.b, -mf))
else:
f += self.b.dimshuffle('x',0)
return T.concatenate([input, f], axis=1)
class BatchNormLayer(lasagne.layers.Layer):
def __init__(self, incoming, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.), nonlinearity=relu, **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = nonlinearity
k = self.input_shape[1]
if b is not None:
self.b = self.add_param(b, (k,), name="b", regularizable=False)
if g is not None:
self.g = self.add_param(g, (k,), name="g", regularizable=False)
self.avg_batch_mean = self.add_param(lasagne.init.Constant(0.), (k,), name="avg_batch_mean", regularizable=False, trainable=False)
self.avg_batch_var = self.add_param(lasagne.init.Constant(1.), (k,), name="avg_batch_var", regularizable=False, trainable=False)
if len(self.input_shape)==4:
self.axes_to_sum = (0,2,3)
self.dimshuffle_args = ['x',0,'x','x']
else:
self.axes_to_sum = 0
self.dimshuffle_args = ['x',0]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
else:
batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
batch_stdv = T.sqrt(1e-6 + batch_var)
norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
# BN updates
new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
if hasattr(self, 'g'):
activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
else:
activation = norm_features
if hasattr(self, 'b'):
activation += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(activation)
def batch_norm(layer, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.), **kwargs):
"""
adapted from https://gist.github.com/f0k/f1a6bd3c8585c400c190
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = lasagne.nonlinearities.identity
else:
nonlinearity = lasagne.nonlinearities.identity
if hasattr(layer, 'b'):
del layer.params[layer.b]
layer.b = None
return BatchNormLayer(layer, b, g, nonlinearity=nonlinearity, **kwargs)
class GaussianNoiseLayer(lasagne.layers.Layer):
def __init__(self, incoming, sigma=0.1, **kwargs):
super(GaussianNoiseLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579))
self.sigma = sigma
def get_output_for(self, input, deterministic=False, use_last_noise=False, **kwargs):
if deterministic or self.sigma == 0:
return input
else:
if not use_last_noise:
self.noise = self._srng.normal(input.shape, avg=0.0, std=self.sigma)
return input + self.noise
# /////////// older code used for MNIST ////////////
# weight normalization
def l2normalize(layer, train_scale=True):
W_param = layer.W
s = W_param.get_value().shape
if len(s)==4:
axes_to_sum = (1,2,3)
dimshuffle_args = [0,'x','x','x']
k = s[0]
else:
axes_to_sum = 0
dimshuffle_args = ['x',0]
k = s[1]
layer.W_scale = layer.add_param(lasagne.init.Constant(1.),
(k,), name="W_scale", trainable=train_scale, regularizable=False)
layer.W = W_param * (layer.W_scale/T.sqrt(1e-6 + T.sum(T.square(W_param),axis=axes_to_sum))).dimshuffle(*dimshuffle_args)
return layer
# fully connected layer with weight normalization
class DenseLayer(lasagne.layers.Layer):
def __init__(self, incoming, num_units, theta=lasagne.init.Normal(0.1), b=lasagne.init.Constant(0.),
weight_scale=lasagne.init.Constant(1.), train_scale=False, nonlinearity=relu, **kwargs):
super(DenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
self.num_units = num_units
num_inputs = int(np.prod(self.input_shape[1:]))
self.theta = self.add_param(theta, (num_inputs, num_units), name="theta")
self.weight_scale = self.add_param(weight_scale, (num_units,), name="weight_scale", trainable=train_scale)
self.W = self.theta * (self.weight_scale/T.sqrt(T.sum(T.square(self.theta),axis=0))).dimshuffle('x',0)
self.b = self.add_param(b, (num_units,), name="b")
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, init=False, deterministic=False, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
activation = T.dot(input, self.W)
if init:
ma = T.mean(activation, axis=0)
activation -= ma.dimshuffle('x',0)
stdv = T.sqrt(T.mean(T.square(activation),axis=0))
activation /= stdv.dimshuffle('x',0)
self.init_updates = [(self.weight_scale, self.weight_scale/stdv), (self.b, -ma/stdv)]
else:
activation += self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from six import StringIO
from skbio.stats.distance import DistanceMatrix
from skbio.tree import TreeNode
def nj(dm, disallow_negative_branch_length=True, result_constructor=None):
""" Apply neighbor joining for phylogenetic reconstruction.
Parameters
----------
dm : skbio.DistanceMatrix
Input distance matrix containing distances between OTUs.
disallow_negative_branch_length : bool, optional
Neighbor joining can result in negative branch lengths, which don't
make sense in an evolutionary context. If `True`, negative branch
lengths will be returned as zero, a common strategy for handling this
issue that was proposed by the original developers of the algorithm.
result_constructor : function, optional
Function to apply to construct the result object. This must take a
newick-formatted string as input. The result of applying this function
to a newick-formatted string will be returned from this function. This
defaults to ``lambda x: TreeNode.read(StringIO(x), format='newick')``.
Returns
-------
TreeNode
By default, the result object is a `TreeNode`, though this can be
overridden by passing `result_constructor`.
See Also
--------
TreeNode.root_at_midpoint
Notes
-----
Neighbor joining was initially described in Saitou and Nei (1987) [1]_. The
example presented here is derived from the Wikipedia page on neighbor
joining [2]_. The Phylip manual also describes the method [3]_ and Phylip
itself provides an implementation which is useful for comparison.
Neighbor joining, by definition, creates unrooted trees. One strategy for
rooting the resulting trees is midpoint rooting, which is accessible as
``TreeNode.root_at_midpoint``.
References
----------
.. [1] Saitou N, and Nei M. (1987) "The neighbor-joining method: a new
method for reconstructing phylogenetic trees." Molecular Biology and
Evolution. PMID: 3447015.
.. [2] http://en.wikipedia.org/wiki/Neighbour_joining
.. [3] http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
Examples
--------
Define a new distance matrix object describing the distances between five
OTUs: a, b, c, d, and e.
>>> from skbio import DistanceMatrix
>>> from skbio.tree import nj
>>> data = [[0, 5, 9, 9, 8],
... [5, 0, 10, 10, 9],
... [9, 10, 0, 8, 7],
... [9, 10, 8, 0, 3],
... [8, 9, 7, 3, 0]]
>>> ids = list('abcde')
>>> dm = DistanceMatrix(data, ids)
Contstruct the neighbor joining tree representing the relationship between
those OTUs. This is returned as a TreeNode object.
>>> tree = nj(dm)
>>> print(tree.ascii_art())
/-d
|
| /-c
|---------|
---------| | /-b
| \--------|
| \-a
|
\-e
Again, construct the neighbor joining tree, but instead return the newick
string representing the tree, rather than the TreeNode object. (Note that
in this example the string output is truncated when printed to facilitate
rendering.)
>>> newick_str = nj(dm, result_constructor=str)
>>> print(newick_str[:55], "...")
(d:2.000000, (c:4.000000, (b:3.000000, a:2.000000):3.00 ...
"""
if dm.shape[0] < 3:
raise ValueError(
"Distance matrix must be at least 3x3 to "
"generate a neighbor joining tree.")
if result_constructor is None:
def result_constructor(x):
return TreeNode.read(StringIO(x), format='newick')
# initialize variables
node_definition = None
# while there are still more than three distances in the distance matrix,
# join neighboring nodes.
while(dm.shape[0] > 3):
# compute the Q matrix
q = _compute_q(dm)
# identify the pair of nodes that have the lowest Q value. if multiple
# pairs have equally low Q values, the first pair identified (closest
# to the top-left of the matrix) will be chosen. these will be joined
# in the current node.
idx1, idx2 = _lowest_index(q)
pair_member_1 = dm.ids[idx1]
pair_member_2 = dm.ids[idx2]
# determine the distance of each node to the new node connecting them.
pair_member_1_len, pair_member_2_len = _pair_members_to_new_node(
dm, idx1, idx2, disallow_negative_branch_length)
# define the new node in newick style
node_definition = "(%s:%f, %s:%f)" % (pair_member_1,
pair_member_1_len,
pair_member_2,
pair_member_2_len)
# compute the new distance matrix, which will contain distances of all
# other nodes to this new node
dm = _compute_collapsed_dm(
dm, pair_member_1, pair_member_2,
disallow_negative_branch_length=disallow_negative_branch_length,
new_node_id=node_definition)
# When there are three distances left in the distance matrix, we have a
# fully defined tree. The last node is internal, and its distances are
# defined by these last three values.
# First determine the distance between the last two nodes to be joined in
# a pair...
pair_member_1 = dm.ids[1]
pair_member_2 = dm.ids[2]
pair_member_1_len, pair_member_2_len = \
_pair_members_to_new_node(dm, pair_member_1, pair_member_2,
disallow_negative_branch_length)
# ...then determine their distance to the other remaining node, but first
# handle the trival case where the input dm was only 3 x 3
node_definition = node_definition or dm.ids[0]
internal_len = _otu_to_new_node(
dm, pair_member_1, pair_member_2, node_definition,
disallow_negative_branch_length=disallow_negative_branch_length)
# ...and finally create the newick string describing the whole tree.
newick = "(%s:%f, %s:%f, %s:%f);" % (pair_member_1, pair_member_1_len,
node_definition, internal_len,
pair_member_2, pair_member_2_len)
# package the result as requested by the user and return it.
return result_constructor(newick)
def _compute_q(dm):
"""Compute Q matrix, used to identify the next pair of nodes to join.
"""
q = np.zeros(dm.shape)
n = dm.shape[0]
for i in range(n):
for j in range(i):
q[i, j] = q[j, i] = \
((n - 2) * dm[i, j]) - dm[i].sum() - dm[j].sum()
return DistanceMatrix(q, dm.ids)
def _compute_collapsed_dm(dm, i, j, disallow_negative_branch_length,
new_node_id):
"""Return the distance matrix resulting from joining ids i and j in a node.
If the input distance matrix has shape ``(n, n)``, the result will have
shape ``(n-1, n-1)`` as the ids `i` and `j` are collapsed to a single new
ids.
"""
in_n = dm.shape[0]
out_n = in_n - 1
out_ids = [new_node_id]
out_ids.extend([e for e in dm.ids if e not in (i, j)])
result = np.zeros((out_n, out_n))
for idx1, out_id1 in enumerate(out_ids[1:]):
result[0, idx1 + 1] = result[idx1 + 1, 0] = _otu_to_new_node(
dm, i, j, out_id1, disallow_negative_branch_length)
for idx2, out_id2 in enumerate(out_ids[1:idx1+1]):
result[idx1+1, idx2+1] = result[idx2+1, idx1+1] = \
dm[out_id1, out_id2]
return DistanceMatrix(result, out_ids)
def _lowest_index(dm):
"""Return the index of the lowest value in the input distance matrix.
If there are ties for the lowest value, the index of top-left most
occurrence of that value will be returned.
This should be ultimately be replaced with a new DistanceMatrix object
method (#228).
"""
lowest_value = np.inf
for i in range(dm.shape[0]):
for j in range(i):
curr_index = i, j
curr_value = dm[curr_index]
if curr_value < lowest_value:
lowest_value = curr_value
result = curr_index
return result
def _otu_to_new_node(dm, i, j, k, disallow_negative_branch_length):
"""Return the distance between a new node and some other node.
Parameters
----------
dm : skbio.DistanceMatrix
The input distance matrix.
i, j : str
Identifiers of entries in the distance matrix to be collapsed. These
get collapsed to a new node, internally represented as `u`.
k : str
Identifier of the entry in the distance matrix for which distance to
`u` will be computed.
disallow_negative_branch_length : bool
Neighbor joining can result in negative branch lengths, which don't
make sense in an evolutionary context. If `True`, negative branch
lengths will be returned as zero, a common strategy for handling this
issue that was proposed by the original developers of the algorithm.
"""
k_to_u = 0.5 * (dm[i, k] + dm[j, k] - dm[i, j])
if disallow_negative_branch_length and k_to_u < 0:
k_to_u = 0
return k_to_u
def _pair_members_to_new_node(dm, i, j, disallow_negative_branch_length):
"""Return the distance between a new node and decendants of that new node.
Parameters
----------
dm : skbio.DistanceMatrix
The input distance matrix.
i, j : str
Identifiers of entries in the distance matrix to be collapsed (i.e.,
the descendents of the new node, which is internally represented as
`u`).
disallow_negative_branch_length : bool
Neighbor joining can result in negative branch lengths, which don't
make sense in an evolutionary context. If `True`, negative branch
lengths will be returned as zero, a common strategy for handling this
issue that was proposed by the original developers of the algorithm.
"""
n = dm.shape[0]
i_to_j = dm[i, j]
i_to_u = (0.5 * i_to_j) + ((dm[i].sum() - dm[j].sum()) / (2 * (n - 2)))
if disallow_negative_branch_length and i_to_u < 0:
i_to_u = 0
j_to_u = i_to_j - i_to_u
if disallow_negative_branch_length and j_to_u < 0:
j_to_u = 0
return i_to_u, j_to_u
|
|
#!/usr/bin/env python
import re
from ..debugging import bacpypes_debugging, ModuleLogger
from ..task import OneShotTask
from ..primitivedata import Atomic, Null, BitString, CharacterString, \
Date, Integer, Double, Enumerated, OctetString, Real, Time, Unsigned
from ..basetypes import PropertyIdentifier, DateTime, NameValue, BinaryPV, \
ChannelValue, DoorValue, PriorityValue, PriorityArray
from ..constructeddata import Array, ArrayOf, SequenceOf
from ..errors import ExecutionError
from ..object import Property, ReadableProperty, WritableProperty, OptionalProperty, Object, \
AccessDoorObject, AnalogOutputObject, AnalogValueObject, \
BinaryOutputObject, BinaryValueObject, BitStringValueObject, CharacterStringValueObject, \
DateValueObject, DatePatternValueObject, DateTimePatternValueObject, \
DateTimeValueObject, IntegerValueObject, \
LargeAnalogValueObject, LightingOutputObject, MultiStateOutputObject, \
MultiStateValueObject, OctetStringValueObject, PositiveIntegerValueObject, \
TimeValueObject, TimePatternValueObject, ChannelObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# handy reference
ArrayOfPropertyIdentifier = ArrayOf(PropertyIdentifier)
#
# CurrentPropertyList
#
@bacpypes_debugging
class CurrentPropertyList(Property):
def __init__(self):
if _debug: CurrentPropertyList._debug("__init__")
Property.__init__(self, 'propertyList', ArrayOfPropertyIdentifier, default=None, optional=True, mutable=False)
def ReadProperty(self, obj, arrayIndex=None):
if _debug: CurrentPropertyList._debug("ReadProperty %r %r", obj, arrayIndex)
# make a list of the properties that have values
property_list = [k for k, v in obj._values.items()
if v is not None
and k not in ('objectName', 'objectType', 'objectIdentifier', 'propertyList')
]
if _debug: CurrentPropertyList._debug(" - property_list: %r", property_list)
# sort the list so it's stable
property_list.sort()
# asking for the whole thing
if arrayIndex is None:
return ArrayOfPropertyIdentifier(property_list)
# asking for the length
if arrayIndex == 0:
return len(property_list)
# asking for an index
if arrayIndex > len(property_list):
raise ExecutionError(errorClass='property', errorCode='invalidArrayIndex')
return property_list[arrayIndex - 1]
def WriteProperty(self, obj, value, arrayIndex=None, priority=None, direct=False):
raise ExecutionError(errorClass='property', errorCode='writeAccessDenied')
#
# CurrentPropertyListMixIn
#
@bacpypes_debugging
class CurrentPropertyListMixIn(Object):
properties = [
CurrentPropertyList(),
]
#
# Turtle Reference Patterns
#
# character reference patterns
HEX = u"[0-9A-Fa-f]"
PERCENT = u"%" + HEX + HEX
UCHAR = u"[\\]u" + HEX * 4 + "|" + u"[\\]U" + HEX * 8
# character sets
PN_CHARS_BASE = (
u"A-Za-z"
u"\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF"
u"\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF"
# u"\uFDF0-\uFFFD\U00010000-\U000EFFFF" - python2.7 on MacOS doesn't like it
)
PN_CHARS_U = PN_CHARS_BASE + u"_"
PN_CHARS = u"-" + PN_CHARS_U + u"0-9\u00B7\u0300-\u036F\u203F-\u2040"
# patterns
IRIREF = u'[<]([^\u0000-\u0020<>"{}|^`\\]|' + UCHAR + u")*[>]"
PN_PREFIX = u"[" + PN_CHARS_BASE + u"](([." + PN_CHARS + u"])*[" + PN_CHARS + u"])?"
PN_LOCAL_ESC = u"[-\\_~.!$&'()*+,;=/?#@%]"
PLX = u"(" + PERCENT + u"|" + PN_LOCAL_ESC + u")"
# non-prefixed names
PN_LOCAL = (
u"(["
+ PN_CHARS_U
+ u":0-9]|"
+ PLX
+ u")((["
+ PN_CHARS
+ u".:]|"
+ PLX
+ u")*(["
+ PN_CHARS
+ u":]|"
+ PLX
+ u"))?"
)
# namespace prefix declaration
PNAME_NS = u"(" + PN_PREFIX + u")?:"
# prefixed names
PNAME_LN = PNAME_NS + PN_LOCAL
# blank nodes
BLANK_NODE_LABEL = (
u"_:[" + PN_CHARS_U + u"0-9]([" + PN_CHARS + u".]*[" + PN_CHARS + u"])?"
)
# see https://www.w3.org/TR/turtle/#sec-parsing-terms
iriref_re = re.compile(u"^" + IRIREF + u"$", re.UNICODE)
local_name_re = re.compile(u"^" + PN_LOCAL + u"$", re.UNICODE)
namespace_prefix_re = re.compile(u"^" + PNAME_NS + u"$", re.UNICODE)
prefixed_name_re = re.compile(u"^" + PNAME_LN + u"$", re.UNICODE)
blank_node_re = re.compile(u"^" + BLANK_NODE_LABEL + u"$", re.UNICODE)
# see https://tools.ietf.org/html/bcp47#section-2.1 for better syntax
language_tag_re = re.compile(u"^[A-Za-z0-9-]+$", re.UNICODE)
class IRI:
# regex from RFC 3986
_e = r"^(?:([^:/?#]+):)?(?://([^/?#]*))?([^?#]*)(?:\?([^#]*))?(?:#(.*))?"
_p = re.compile(_e)
_default_ports = (("http", ":80"), ("https", ":443"))
def __init__(self, iri=None):
self.iri = iri
if not iri:
g = (None, None, None, None, None)
else:
m = IRI._p.match(iri)
if not m:
raise ValueError("not an IRI")
# remove default http and https ports
g = list(m.groups())
for scheme, suffix in IRI._default_ports:
if (g[0] == scheme) and g[1] and g[1].endswith(suffix):
g[1] = g[1][: g[1].rfind(":")]
break
self.scheme, self.authority, self.path, self.query, self.fragment = g
def __str__(self):
rval = ""
if self.scheme:
rval += self.scheme + ":"
if self.authority is not None:
rval += "//" + self.authority
if self.path is not None:
rval += self.path
if self.query is not None:
rval += "?" + self.query
if self.fragment is not None:
rval += "#" + self.fragment
return rval
def is_local_name(self):
if not all(
(
self.scheme is None,
self.authority is None,
self.path,
self.query is None,
self.fragment is None,
)
):
return False
if self.path.startswith(":") or "/" in self.path: # term is not ':x'
return False
return True
def is_prefix(self):
if not all((self.authority is None, self.query is None, self.fragment is None)):
return False
if self.scheme:
return self.path == "" # term is 'x:'
else:
return self.path == ":" # term is ':'
def is_prefixed_name(self):
if not all((self.authority is None, self.query is None, self.fragment is None)):
return False
if self.scheme:
return self.path != "" # term is 'x:y'
else: # term is ':y' but not ':'
return self.path and (self.path != ":") and self.path.startswith(":")
def resolve(self, iri):
"""Resolve a relative IRI to this IRI as a base."""
# parse the IRI if necessary
if isinstance(iri, str):
iri = IRI(iri)
elif not isinstance(iri, IRI):
raise TypeError("iri must be an IRI or a string")
# return an IRI object
rslt = IRI()
if iri.scheme and iri.scheme != self.scheme:
rslt.scheme = iri.scheme
rslt.authority = iri.authority
rslt.path = iri.path
rslt.query = iri.query
else:
rslt.scheme = self.scheme
if iri.authority is not None:
rslt.authority = iri.authority
rslt.path = iri.path
rslt.query = iri.query
else:
rslt.authority = self.authority
if not iri.path:
rslt.path = self.path
if iri.query is not None:
rslt.query = iri.query
else:
rslt.query = self.query
else:
if iri.path.startswith("/"):
# IRI represents an absolute path
rslt.path = iri.path
else:
# merge paths
path = self.path
# append relative path to the end of the last
# directory from base
path = path[0 : path.rfind("/") + 1]
if len(path) > 0 and not path.endswith("/"):
path += "/"
path += iri.path
rslt.path = path
rslt.query = iri.query
# normalize path
if rslt.path != "":
rslt.remove_dot_segments()
rslt.fragment = iri.fragment
return rslt
def remove_dot_segments(self):
# empty path shortcut
if len(self.path) == 0:
return
input_ = self.path.split("/")
output_ = []
while len(input_) > 0:
next = input_.pop(0)
done = len(input_) == 0
if next == ".":
if done:
# ensure output has trailing /
output_.append("")
continue
if next == "..":
if len(output_) > 0:
output_.pop()
if done:
# ensure output has trailing /
output_.append("")
continue
output_.append(next)
# ensure output has leading /
if len(output_) > 0 and output_[0] != "":
output_.insert(0, "")
if len(output_) == 1 and output_[0] == "":
return "/"
self.path = "/".join(output_)
@bacpypes_debugging
class TagSet:
def index(self, name, value=None):
"""Find the first name with dictionary semantics or (name, value) with
list semantics."""
if _debug: TagSet._debug("index %r %r", name, value)
# if this is a NameValue rip it apart first
if isinstance(name, NameValue):
name, value = name.name, name.value
# no value then look for first matching name
if value is None:
for i, v in enumerate(self.value):
if isinstance(v, int):
continue
if name == v.name:
return i
else:
raise KeyError(name)
# skip int values, it is the zeroth element of an array but does
# not exist for a list
for i, v in enumerate(self.value):
if isinstance(v, int):
continue
if (
name == v.name
and isinstance(value, type(v.value))
and value.value == v.value.value
):
return i
else:
raise ValueError((name, value))
def add(self, name, value=None):
"""Add a (name, value) with mutable set semantics."""
if _debug: TagSet._debug("add %r %r", name, value)
# provide a Null if you are adding a is-a relationship, wrap strings
# to be friendly
if value is None:
value = Null()
elif isinstance(value, str):
value = CharacterString(value)
# name is a string
if not isinstance(name, str):
raise TypeError("name must be a string, got %r" % (type(name),))
# reserved directive names
if name.startswith("@"):
if name == "@base":
if not isinstance(value, CharacterString):
raise TypeError("value must be an string")
v = self.get('@base')
if v and v.value == value.value:
pass
else:
raise ValueError("@base exists")
# if not iriref_re.match(value.value):
# raise ValueError("value must be an IRI")
elif name == "@id":
if not isinstance(value, CharacterString):
raise TypeError("value must be an string")
v = self.get('@id')
if v and v.value == value.value:
pass
else:
raise ValueError("@id exists")
# # check the patterns
# for pattern in (blank_node_re, prefixed_name_re, local_name_re, iriref_re):
# if pattern.match(value.value):
# break
# else:
# raise ValueError("invalid value for @id")
elif name == "@language":
if not isinstance(value, CharacterString):
raise TypeError("value must be an string")
v = self.get("@language")
if v and v.value == value.value:
pass
else:
raise ValueError("@language exists")
if not language_tag_re.match(value.value):
raise ValueError("value must be a language tag")
elif name == "@vocab":
if not isinstance(value, CharacterString):
raise TypeError("value must be an string")
v = self.get('@vocab')
if v and v.value == value.value:
pass
else:
raise ValueError("@vocab exists")
else:
raise ValueError("invalid directive name")
elif name.endswith(":"):
if not isinstance(value, CharacterString):
raise TypeError("value must be an string")
v = self.get(name)
if v and v.value == value.value:
pass
else:
raise ValueError("prefix exists: %r" % (name,))
# if not iriref_re.match(value.value):
# raise ValueError("value must be an IRI")
else:
# # check the patterns
# for pattern in (prefixed_name_re, local_name_re, iriref_re):
# if pattern.match(name):
# break
# else:
# raise ValueError("invalid name")
pass
# check the value
if not isinstance(value, (Atomic, DateTime)):
raise TypeError("invalid value")
# see if the (name, value) already exists
try:
self.index(name, value)
except ValueError:
super(TagSet, self).append(NameValue(name=name, value=value))
def discard(self, name, value=None):
"""Discard a (name, value) with mutable set semantics."""
if _debug: TagSet._debug("discard %r %r", name, value)
# provide a Null if you are adding a is-a relationship, wrap strings
# to be friendly
if value is None:
value = Null()
elif isinstance(value, str):
value = CharacterString(value)
indx = self.index(name, value)
return super(TagSet, self).__delitem__(indx)
def append(self, name_value):
"""Override the append operation for mutable set semantics."""
if _debug: TagSet._debug("append %r", name_value)
if not isinstance(name_value, NameValue):
raise TypeError
# turn this into an add operation
self.add(name_value.name, name_value.value)
def get(self, key, default=None):
"""Get the value of a key or default value if the key was not found,
dictionary semantics."""
if _debug: TagSet._debug("get %r %r", key, default)
try:
if not isinstance(key, str):
raise TypeError(key)
return self.value[self.index(key)].value
except KeyError:
return default
def __getitem__(self, item):
"""If item is an integer, return the value of the NameValue element
with array/sequence semantics. If the item is a string, return the
value with dictionary semantics."""
if _debug: TagSet._debug("__getitem__ %r", item)
# integers imply index
if isinstance(item, int):
return super(TagSet, self).__getitem__(item)
return self.value[self.index(item)]
def __setitem__(self, item, value):
"""If item is an integer, change the value of the NameValue element
with array/sequence semantics. If the item is a string, change the
current value or add a new value with dictionary semantics."""
if _debug: TagSet._debug("__setitem__ %r %r", item, value)
# integers imply index
if isinstance(item, int):
indx = item
if indx < 0:
raise IndexError("assignment index out of range")
elif isinstance(self, Array):
if indx == 0 or indx > len(self.value):
raise IndexError
elif indx >= len(self.value):
raise IndexError
elif isinstance(item, str):
try:
indx = self.index(item)
except KeyError:
self.add(item, value)
return
else:
raise TypeError(repr(item))
# check the value
if value is None:
value = Null()
elif not isinstance(value, (Atomic, DateTime)):
raise TypeError("invalid value")
# now we're good to go
self.value[indx].value = value
def __delitem__(self, item):
"""If the item is a integer, delete the element with array semantics, or
if the item is a string, delete the element with dictionary semantics,
or (name, value) with mutable set semantics."""
if _debug: TagSet._debug("__delitem__ %r", item)
# integers imply index
if isinstance(item, int):
indx = item
elif isinstance(item, str):
indx = self.index(item)
elif isinstance(item, tuple):
indx = self.index(*item)
else:
raise TypeError(item)
return super(TagSet, self).__delitem__(indx)
def __contains__(self, key):
if _debug: TagSet._debug("__contains__ %r", key)
try:
if isinstance(key, tuple):
self.index(*key)
elif isinstance(key, str):
self.index(key)
else:
raise TypeError(key)
return True
except (KeyError, ValueError):
return False
class ArrayOfNameValue(TagSet, ArrayOf(NameValue)):
pass
class SequenceOfNameValue(TagSet, SequenceOf(NameValue)):
pass
class TagsMixIn(Object):
properties = \
[ OptionalProperty('tags', ArrayOfNameValue)
]
@bacpypes_debugging
def Commandable(datatype, presentValue='presentValue', priorityArray='priorityArray', relinquishDefault='relinquishDefault'):
if _debug: Commandable._debug("Commandable %r ...", datatype)
class _Commando(object):
properties = [
WritableProperty(presentValue, datatype),
ReadableProperty(priorityArray, PriorityArray),
ReadableProperty(relinquishDefault, datatype),
]
_pv_choice = None
def __init__(self, **kwargs):
super(_Commando, self).__init__(**kwargs)
# build a default value in case one is needed
default_value = datatype().value
if issubclass(datatype, Enumerated):
default_value = datatype._xlate_table[default_value]
if _debug: Commandable._debug(" - default_value: %r", default_value)
# see if a present value was provided
if (presentValue not in kwargs):
setattr(self, presentValue, default_value)
# see if a priority array was provided
if (priorityArray not in kwargs):
setattr(self, priorityArray, PriorityArray())
# see if a present value was provided
if (relinquishDefault not in kwargs):
setattr(self, relinquishDefault, default_value)
def _highest_priority_value(self):
if _debug: Commandable._debug("_highest_priority_value")
priority_array = getattr(self, priorityArray)
for i in range(1, 17):
priority_value = priority_array[i]
if priority_value.null is None:
if _debug: Commandable._debug(" - found at index: %r", i)
value = getattr(priority_value, _Commando._pv_choice)
value_source = "###"
if issubclass(datatype, Enumerated):
value = datatype._xlate_table[value]
if _debug: Commandable._debug(" - remapped enumeration: %r", value)
break
else:
value = getattr(self, relinquishDefault)
value_source = None
if _debug: Commandable._debug(" - value, value_source: %r, %r", value, value_source)
# return what you found
return value, value_source
def WriteProperty(self, property, value, arrayIndex=None, priority=None, direct=False):
if _debug: Commandable._debug("WriteProperty %r %r arrayIndex=%r priority=%r direct=%r", property, value, arrayIndex, priority, direct)
# when writing to the presentValue with a priority
if (property == presentValue):
if _debug: Commandable._debug(" - writing to %s, priority %r", presentValue, priority)
# default (lowest) priority
if priority is None:
priority = 16
if _debug: Commandable._debug(" - translate to priority array, index %d", priority)
# translate to updating the priority array
property = priorityArray
arrayIndex = priority
priority = None
# update the priority array entry
if (property == priorityArray):
if (arrayIndex is None):
if _debug: Commandable._debug(" - writing entire %s", priorityArray)
# pass along the request
super(_Commando, self).WriteProperty(
property, value,
arrayIndex=arrayIndex, priority=priority, direct=direct,
)
else:
if _debug: Commandable._debug(" - writing to %s, array index %d", priorityArray, arrayIndex)
# check the bounds
if arrayIndex == 0:
raise ExecutionError(errorClass='property', errorCode='writeAccessDenied')
if (arrayIndex < 1) or (arrayIndex > 16):
raise ExecutionError(errorClass='property', errorCode='invalidArrayIndex')
# update the specific priorty value element
priority_value = getattr(self, priorityArray)[arrayIndex]
if _debug: Commandable._debug(" - priority_value: %r", priority_value)
# the null or the choice has to be set, the other clear
if value == ():
if _debug: Commandable._debug(" - write a null")
priority_value.null = value
setattr(priority_value, _Commando._pv_choice, None)
else:
if _debug: Commandable._debug(" - write a value")
if issubclass(datatype, Enumerated):
value = datatype._xlate_table[value]
if _debug: Commandable._debug(" - remapped enumeration: %r", value)
priority_value.null = None
setattr(priority_value, _Commando._pv_choice, value)
# look for the highest priority value
value, value_source = self._highest_priority_value()
# compare with the current value
current_value = getattr(self, presentValue)
if value == current_value:
if _debug: Commandable._debug(" - no present value change")
return
# turn this into a present value change
property = presentValue
arrayIndex = priority = None
# allow the request to pass through
if _debug: Commandable._debug(" - super: %r %r arrayIndex=%r priority=%r", property, value, arrayIndex, priority)
super(_Commando, self).WriteProperty(
property, value,
arrayIndex=arrayIndex, priority=priority, direct=direct,
)
# look up a matching priority value choice
for element in PriorityValue.choiceElements:
if issubclass(datatype, element.klass):
_Commando._pv_choice = element.name
break
else:
_Commando._pv_choice = 'constructedValue'
if _debug: Commandable._debug(" - _pv_choice: %r", _Commando._pv_choice)
# return the class
return _Commando
#
# MinOnOffTask
#
@bacpypes_debugging
class MinOnOffTask(OneShotTask):
def __init__(self, binary_obj):
if _debug: MinOnOffTask._debug("__init__ %s", repr(binary_obj))
OneShotTask.__init__(self)
# save a reference to the object
self.binary_obj = binary_obj
# listen for changes to the present value
self.binary_obj._property_monitors['presentValue'].append(self.present_value_change)
def present_value_change(self, old_value, new_value):
if _debug: MinOnOffTask._debug("present_value_change %r %r", old_value, new_value)
# if there's no value change, skip all this
if old_value == new_value:
if _debug: MinOnOffTask._debug(" - no state change")
return
# get the minimum on/off time
if new_value == 'inactive':
task_delay = getattr(self.binary_obj, 'minimumOnTime') or 0
if _debug: MinOnOffTask._debug(" - minimum on: %r", task_delay)
elif new_value == 'active':
task_delay = getattr(self.binary_obj, 'minimumOffTime') or 0
if _debug: MinOnOffTask._debug(" - minimum off: %r", task_delay)
else:
raise ValueError("unrecognized present value for %r: %r" % (self.binary_obj.objectIdentifier, new_value))
# if there's no delay, don't bother
if not task_delay:
if _debug: MinOnOffTask._debug(" - no delay")
return
# set the value at priority 6
self.binary_obj.WriteProperty('presentValue', new_value, priority=6)
# install this to run, if there is a delay
self.install_task(delta=task_delay)
def process_task(self):
if _debug: MinOnOffTask._debug("process_task(%s)", self.binary_obj.objectName)
# clear the value at priority 6
self.binary_obj.WriteProperty('presentValue', (), priority=6)
#
# MinOnOff
#
@bacpypes_debugging
class MinOnOff(object):
def __init__(self, **kwargs):
if _debug: MinOnOff._debug("__init__ ...")
super(MinOnOff, self).__init__(**kwargs)
# create the timer task
self._min_on_off_task = MinOnOffTask(self)
#
# Commandable Standard Objects
#
class AccessDoorCmdObject(Commandable(DoorValue), AccessDoorObject):
pass
class AnalogOutputCmdObject(Commandable(Real), AnalogOutputObject):
pass
class AnalogValueCmdObject(Commandable(Real), AnalogValueObject):
pass
### class BinaryLightingOutputCmdObject(Commandable(Real), BinaryLightingOutputObject):
### pass
class BinaryOutputCmdObject(Commandable(BinaryPV), MinOnOff, BinaryOutputObject):
pass
class BinaryValueCmdObject(Commandable(BinaryPV), MinOnOff, BinaryValueObject):
pass
class BitStringValueCmdObject(Commandable(BitString), BitStringValueObject):
pass
class CharacterStringValueCmdObject(Commandable(CharacterString), CharacterStringValueObject):
pass
class DateValueCmdObject(Commandable(Date), DateValueObject):
pass
class DatePatternValueCmdObject(Commandable(Date), DatePatternValueObject):
pass
class DateTimeValueCmdObject(Commandable(DateTime), DateTimeValueObject):
pass
class DateTimePatternValueCmdObject(Commandable(DateTime), DateTimePatternValueObject):
pass
class IntegerValueCmdObject(Commandable(Integer), IntegerValueObject):
pass
class LargeAnalogValueCmdObject(Commandable(Double), LargeAnalogValueObject):
pass
class LightingOutputCmdObject(Commandable(Real), LightingOutputObject):
pass
class MultiStateOutputCmdObject(Commandable(Unsigned), MultiStateOutputObject):
pass
class MultiStateValueCmdObject(Commandable(Unsigned), MultiStateValueObject):
pass
class OctetStringValueCmdObject(Commandable(OctetString), OctetStringValueObject):
pass
class PositiveIntegerValueCmdObject(Commandable(Unsigned), PositiveIntegerValueObject):
pass
class TimeValueCmdObject(Commandable(Time), TimeValueObject):
pass
class TimePatternValueCmdObject(Commandable(Time), TimePatternValueObject):
pass
@bacpypes_debugging
class ChannelValueProperty(Property):
def __init__(self):
if _debug: ChannelValueProperty._debug("__init__")
Property.__init__(self, 'presentValue', ChannelValue, default=None, optional=False, mutable=True)
def WriteProperty(self, obj, value, arrayIndex=None, priority=None, direct=False):
if _debug: ChannelValueProperty._debug("WriteProperty %r %r arrayIndex=%r priority=%r direct=%r", obj, value, arrayIndex, priority, direct)
### Clause 12.53.5, page 487
raise NotImplementedError()
class ChannelCmdObject(ChannelObject):
properties = [
ChannelValueProperty(),
]
|
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from json import dumps, load
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from storage import Storage
from utils import logger, hash_decode, hash_encode, Hash, header_from_string, header_to_string, ProfiledThread, \
rev_hex, int_to_hex4
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
# monitoring
self.avg_time = 0,0,0
self.time_ref = time.time()
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.merkle_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_fees = {}
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {} # addr -> (txid, delta)
self.mempool_unconfirmed = {} # txid -> set of unconfirmed inputs
self.mempool_hashes = set()
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.digibyted_url = 'http://%s:%s@%s:%s/' % (
config.get('digibyted', 'digibyted_user'),
config.get('digibyted', 'digibyted_password'),
config.get('digibyted', 'digibyted_host'),
config.get('digibyted', 'digibyted_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
# start catch_up thread
if config.getboolean('leveldb', 'profiler'):
filename = os.path.join(config.get('leveldb', 'path'), 'profile')
print_log('profiled thread', filename)
self.blockchain_thread = ProfiledThread(filename, target = self.do_catch_up)
else:
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.digibyted('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
if not self.shared.stopped():
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("digibyted is responding")
self.shared.unpause()
time.sleep(10)
def set_time(self):
self.time_ref = time.time()
def print_time(self, num_tx):
delta = time.time() - self.time_ref
# leaky averages
seconds_per_block, tx_per_second, n = self.avg_time
alpha = (1. + 0.01 * n)/(n+1)
seconds_per_block = (1-alpha) * seconds_per_block + alpha * delta
alpha2 = alpha * delta / seconds_per_block
tx_per_second = (1-alpha2) * tx_per_second + alpha2 * num_tx / delta
self.avg_time = seconds_per_block, tx_per_second, n+1
if self.storage.height%100 == 0 \
or (self.storage.height%10 == 0 and self.storage.height >= 300000)\
or self.storage.height >= 1000000:
msg = "block %d (%d %.2fs) %s" %(self.storage.height, num_tx, delta, self.storage.get_root_hash().encode('hex'))
msg += " (%.2ftx/s, %.2fs/block)" % (tx_per_second, seconds_per_block)
run_blocks = self.storage.height - self.start_catchup_height
remaining_blocks = self.digibyted_height - self.storage.height
if run_blocks>0 and remaining_blocks>0:
remaining_minutes = remaining_blocks * seconds_per_block / 60
new_blocks = int(remaining_minutes / 10) # number of new blocks expected during catchup
blocks_to_process = remaining_blocks + new_blocks
minutes = blocks_to_process * seconds_per_block / 60
rt = "%.0fmin"%minutes if minutes < 300 else "%.1f hours"%(minutes/60)
msg += " (eta %s, %d blocks)" % (rt, remaining_blocks)
print_log(msg)
def wait_on_digibyted(self):
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise BaseException()
def digibyted(self, method, params=()):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
response = urllib.urlopen(self.digibyted_url, postdata)
r = load(response)
response.close()
except:
print_log("cannot reach digibyted...")
self.wait_on_digibyted()
else:
if r['error'] is not None:
if r['error'].get('code') == -28:
print_log("digibyted still warming up...")
self.wait_on_digibyted()
continue
raise BaseException(r['error'])
break
return r.get('result')
@staticmethod
def block2header(b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.digibyted('getblockhash', (height,))
b = self.digibyted('getblock', (block_hash,))
return self.block2header(b)
def init_headers(self, db_height):
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
@staticmethod
def hash_header(header):
return rev_hex(Hash(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if chunk_index in self.chunk_cache:
del self.chunk_cache[chunk_index]
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
if chunk:
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.digibyted('getrawtransaction', (txid, 0))
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_unconfirmed_history(self, addr):
hist = []
with self.mempool_lock:
for tx_hash, delta in self.mempool_hist.get(addr, ()):
height = -1 if self.mempool_unconfirmed.get(tx_hash) else 0
fee = self.mempool_fees.get(tx_hash)
hist.append({'tx_hash':tx_hash, 'height':height, 'fee':fee})
return hist
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
hist = self.storage.get_history(addr)
hist.extend(self.get_unconfirmed_history(addr))
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
v = 0
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, ()):
v += delta
return v
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''.join(tx.get('tx_hash') + ':%d:' % tx.get('height') for tx in tx_points)
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height, cache_only):
with self.cache_lock:
out = self.merkle_cache.get(tx_hash)
if out is not None:
return out
if cache_only:
return -1
block_hash = self.digibyted('getblockhash', (height,))
b = self.digibyted('getblock', (block_hash,))
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
out = {"block_height": height, "merkle": s, "pos": tx_pos}
with self.cache_lock:
if len(self.merkle_cache) > self.max_cache_size:
logger.info("clearing merkle cache")
self.merkle_cache.clear()
self.merkle_cache[tx_hash] = out
return out
@staticmethod
def deserialize_block(block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, revert=False):
touched_addr = set()
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.digibyted_height, undo_info)
# add the max
self.storage.save_height(block_hash, block_height)
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
# batch write modified nodes
self.storage.batch_write()
# return length for monitoring
return len(tx_hashes)
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
del self.watched_addresses[addr]
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', ())
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex4(pos)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.digibyted('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
error = e.args[0]
if error["code"] == -26:
# If we return anything that's not the transaction hash,
# it's considered an error message
message = error["message"]
if "non-mandatory-script-verify-flag" in message:
result = "Your client produced a transaction that is not accepted by the Digibyte network any more. Please upgrade to Electrum 2.5.1 or newer\n"
else:
result = "The transaction was rejected by network rules.(" + message + ")\n" \
"[" + params[0] + "]"
else:
result = error["message"] # do send an error
print_log("error:", result)
elif method == 'blockchain.transaction.get_merkle':
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height, cache_only)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.digibyted('getrawtransaction', (tx_hash, 0))
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.digibyted('estimatefee', (num,))
elif method == 'blockchain.relayfee':
result = self.relayfee
else:
raise BaseException("unknown method:%s" % method)
return result
def get_block(self, block_hash):
block = self.digibyted('getblock', (block_hash,))
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": (txid,),
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
while True:
try:
response = urllib.urlopen(self.digibyted_url, postdata)
r = load(response)
response.close()
except:
logger.error("digibyted error (getfullblock)")
self.wait_on_digibyted()
continue
try:
rawtxdata = []
for ir in r:
assert ir['error'] is None, "Error: make sure you run digibyted with txindex=1; use -reindex if needed."
rawtxdata.append(ir['result'])
except BaseException as e:
logger.error(str(e))
self.wait_on_digibyted()
continue
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
self.start_catchup_height = self.storage.height
prev_root_hash = None
n = 0
while not self.shared.stopped():
# are we done yet?
info = self.digibyted('getinfo')
self.relayfee = info.get('relayfee')
self.digibyted_height = info.get('blocks')
digibyted_block_hash = self.digibyted('getblockhash', (self.digibyted_height,))
if self.storage.last_hash == digibyted_block_hash:
self.up_to_date = True
break
self.set_time()
revert = (random.randint(1, 100) == 1) if self.test_reorgs and self.storage.height>100 else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.digibyted('getblockhash', (self.storage.height + 1,))
except BaseException, e:
revert = True
next_block = self.get_block(next_block_hash if not revert else self.storage.last_hash)
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
n = self.import_block(next_block, next_block_hash, self.storage.height+1)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
else:
# revert current block
block = self.get_block(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
n = self.import_block(block, self.storage.last_hash, self.storage.height, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
# print time
self.print_time(n)
self.header = self.block2header(self.digibyted('getblock', (self.storage.last_hash,)))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
t0 = time.time()
mempool_hashes = set(self.digibyted('getrawmempool'))
touched_addresses = set()
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
out_sum = 0
for x in tx.get('outputs'):
addr = x.get('address', '')
value = x['value']
out_values.append((addr, value))
if not addr:
continue
v = mpa.get(addr, 0)
v += value
mpa[addr] = v
touched_addresses.add(addr)
out_sum += value
self.mempool_fees[tx_hash] = -out_sum
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
self.mempool_unconfirmed[tx_hash] = set()
# check all inputs
for tx_hash, tx in new_tx.iteritems():
mpa = self.mempool_addresses.get(tx_hash, {})
# are we spending unconfirmed inputs?
input_sum = 0
for x in tx.get('inputs'):
prev_hash = x.get('prevout_hash')
prev_n = x.get('prevout_n')
mpv = self.mempool_values.get(prev_hash)
if mpv:
addr, value = mpv[prev_n]
self.mempool_unconfirmed[tx_hash].add(prev_hash)
else:
txi = (prev_hash + int_to_hex4(prev_n)).decode('hex')
try:
addr = self.storage.get_address(txi)
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
# we can proceed
input_sum += value
if not addr:
continue
v = mpa.get(addr, 0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
self.mempool_fees[tx_hash] += input_sum
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
del self.mempool_addresses[tx_hash]
del self.mempool_values[tx_hash]
del self.mempool_unconfirmed[tx_hash]
del self.mempool_fees[tx_hash]
touched_addresses.update(addresses)
# remove deprecated entries from mempool_hist
new_mempool_hist = {}
for addr in self.mempool_hist.iterkeys():
h = self.mempool_hist[addr]
hh = []
for tx_hash, delta in h:
if tx_hash in self.mempool_addresses:
hh.append((tx_hash, delta))
if hh:
new_mempool_hist[addr] = hh
# add new transactions to mempool_hist
for tx_hash in new_tx.iterkeys():
addresses = self.mempool_addresses[tx_hash]
for addr, delta in addresses.iteritems():
h = new_mempool_hist.get(addr, [])
if (tx_hash, delta) not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
t1 = time.time()
if t1-t0>1:
print_log('mempool_update', t1-t0, len(self.mempool_hashes), len(self.mempool_hist))
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
# print_log("cache: invalidating", address)
del self.history_cache[address]
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
self.catch_up()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': (self.storage.height,),
})
if self.sent_header != self.header:
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': (self.header,),
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': (addr, status),
})
|
|
"""
Utililty classes and functions for the polynomial modules.
This module provides: error and warning objects; a polynomial base class;
and some routines used in both the `polynomial` and `chebyshev` modules.
Error objects
-------------
.. autosummary::
:toctree: generated/
PolyError base class for this sub-package's errors.
PolyDomainError raised when domains are mismatched.
Warning objects
---------------
.. autosummary::
:toctree: generated/
RankWarning raised in least-squares fit for rank-deficient matrix.
Base class
----------
.. autosummary::
:toctree: generated/
PolyBase Obsolete base class for the polynomial classes. Do not use.
Functions
---------
.. autosummary::
:toctree: generated/
as_series convert list of array_likes into 1-D arrays of common type.
trimseq remove trailing zeros.
trimcoef remove small trailing coefficients.
getdomain return the domain appropriate for a given set of abscissae.
mapdomain maps points between domains.
mapparms parameters of the linear map between domains.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['RankWarning', 'PolyError', 'PolyDomainError', 'as_series',
'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms',
'PolyBase']
import warnings
import numpy as np
import sys
#
# Warnings and Exceptions
#
class RankWarning(UserWarning) :
"""Issued by chebfit when the design matrix is rank deficient."""
pass
class PolyError(Exception) :
"""Base class for errors in this module."""
pass
class PolyDomainError(PolyError) :
"""Issued by the generic Poly class when two domains don't match.
This is raised when an binary operation is passed Poly objects with
different domains.
"""
pass
#
# Base class for all polynomial types
#
class PolyBase(object) :
"""
Base class for all polynomial types.
Deprecated in numpy 1.9.0, use the abstract
ABCPolyBase class instead. Note that the latter
reguires a number of virtual functions to be
implemented.
"""
pass
#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq) :
"""Remove small Poly series coefficients.
Parameters
----------
seq : sequence
Sequence of Poly series coefficients. This routine fails for
empty sequences.
Returns
-------
series : sequence
Subsequence with trailing zeros removed. If the resulting sequence
would be empty, return the first element. The returned sequence may
or may not be a view.
Notes
-----
Do not lose the type info if the sequence contains unknown objects.
"""
if len(seq) == 0 :
return seq
else :
for i in range(len(seq) - 1, -1, -1) :
if seq[i] != 0 :
break
return seq[:i+1]
def as_series(alist, trim=True) :
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
a : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy import polynomial as P
>>> a = np.arange(4)
>>> P.as_series(a)
[array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])]
>>> b = np.arange(6).reshape((2,3))
>>> P.as_series(b)
[array([ 0., 1., 2.]), array([ 3., 4., 5.])]
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0 :
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]) :
raise ValueError("Coefficient array is not 1-d")
if trim :
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]) :
ret = []
for a in arrays :
if a.dtype != np.dtype(object) :
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else :
ret.append(a.copy())
else :
try :
dtype = np.common_type(*arrays)
except :
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
def trimcoef(c, tol=0) :
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
See Also
--------
trimseq
Examples
--------
>>> from numpy import polynomial as P
>>> P.trimcoef((0,0,3,0,5,0,0))
array([ 0., 0., 3., 0., 5.])
>>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([ 0.])
>>> i = complex(0,1) # works for complex
>>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([ 0.0003+0.j , 0.0010-0.001j])
"""
if tol < 0 :
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.where(np.abs(c) > tol)
if len(ind) == 0 :
return c[:1]*0
else :
return c[:ind[-1] + 1].copy()
def getdomain(x) :
"""
Return a domain suitable for given abscissae.
Find a domain suitable for a polynomial or Chebyshev series
defined at the values supplied.
Parameters
----------
x : array_like
1-d array of abscissae whose domain will be determined.
Returns
-------
domain : ndarray
1-d array containing two values. If the inputs are complex, then
the two returned points are the lower left and upper right corners
of the smallest rectangle (aligned with the axes) in the complex
plane containing the points `x`. If the inputs are real, then the
two points are the ends of the smallest interval containing the
points `x`.
See Also
--------
mapparms, mapdomain
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> points = np.arange(4)**2 - 5; points
array([-5, -4, -1, 4])
>>> pu.getdomain(points)
array([-5., 4.])
>>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
>>> pu.getdomain(c)
array([-1.-1.j, 1.+1.j])
"""
[x] = as_series([x], trim=False)
if x.dtype.char in np.typecodes['Complex'] :
rmin, rmax = x.real.min(), x.real.max()
imin, imax = x.imag.min(), x.imag.max()
return np.array((complex(rmin, imin), complex(rmax, imax)))
else :
return np.array((x.min(), x.max()))
def mapparms(old, new) :
"""
Linear map parameters between domains.
Return the parameters of the linear map ``offset + scale*x`` that maps
`old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
Parameters
----------
old, new : array_like
Domains. Each domain must (successfully) convert to a 1-d array
containing precisely two values.
Returns
-------
offset, scale : scalars
The map ``L(x) = offset + scale*x`` maps the first domain to the
second.
See Also
--------
getdomain, mapdomain
Notes
-----
Also works for complex numbers, and thus can be used to calculate the
parameters required to map any line in the complex plane to any other
line therein.
Examples
--------
>>> from numpy import polynomial as P
>>> P.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> P.mapparms((1,-1),(-1,1))
(0.0, -1.0)
>>> i = complex(0,1)
>>> P.mapparms((-i,-1),(1,i))
((1+1j), (1+0j))
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl
def mapdomain(x, old, new) :
"""
Apply linear map to input points.
The linear map ``offset + scale*x`` that maps the domain `old` to
the domain `new` is applied to the points `x`.
Parameters
----------
x : array_like
Points to be mapped. If `x` is a subtype of ndarray the subtype
will be preserved.
old, new : array_like
The two domains that determine the map. Each must (successfully)
convert to 1-d arrays containing precisely two values.
Returns
-------
x_out : ndarray
Array of points of the same shape as `x`, after application of the
linear map between the two domains.
See Also
--------
getdomain, mapparms
Notes
-----
Effectively, this implements:
.. math ::
x\\_out = new[0] + m(x - old[0])
where
.. math ::
m = \\frac{new[1]-new[0]}{old[1]-old[0]}
Examples
--------
>>> from numpy import polynomial as P
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = P.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825,
6.28318531])
>>> x - P.mapdomain(x_out, new_domain, old_domain)
array([ 0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
>>> i = complex(0,1)
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ])
>>> new_z = P.mapdomain(z, old, new); new_z
array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ])
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
|
|
# Encoding: UTF-8
"""Tools to manage translation files
To use, create a ForrinTools object and pass it a Translator class.
Then, call the run method.
"""
from __future__ import print_function, unicode_literals, division
import os
import argparse
import textwrap
import six
import pkg_resources
import polib
import forrin.extract
from forrin.message import POTFile
def yield_messages(source_dir, printer=lambda *a, **ka: None):
"""Yield messages from all Python sources in the source_dir tree
Feel free to use this as an example
"""
for dirpath, dirnames, filenames in os.walk(source_dir):
for filename in filenames:
if filename.endswith('.py'):
filepath = os.path.join(dirpath, filename)
printer('Extracting from %s' % filepath)
for message in forrin.extract.extract_python(filepath):
yield message
class ForrinTools(object):
"""A set of tools suite for managing translations
"""
def __init__(self, translator_class, source_dir=None):
self.translator_class = translator_class
translator = self.translator = translator_class()
self.i18n_dir = translator.i18n_directory
self.domain = translator.domain
self.pot_name = os.path.join(self.i18n_dir, '%s.pot' % self.domain)
if not source_dir:
source_dir = pkg_resources.resource_filename(
self.translator.package, '')
self.source_dir = source_dir
def yield_messages(self, printer=lambda *a, **ka: None):
"""Yield all messages for this domain"""
return yield_messages(self.source_dir, printer)
def extract(self, args):
"""Extract messages to a .pot file, then merge to languages
Returns the POTFile created
"""
args.printer('Extracting source messages')
pot = POTFile(project_name=self.domain)
for message in yield_messages(self.source_dir, args.printer):
pot.add(message)
return pot
def get_pot(self, args):
if args.cached_pot:
args.printer('Reading translations from %s' % self.pot_name)
return polib.pofile(self.pot_name)
else:
return self.extract(args)
def get_langs(self, args):
if args.langs is None:
return list(self.translator.available_languages())
else:
return [l for l in args.langs.split(',') if l]
def merge(self, args):
"""Merge the source pot file with individual language files
"""
pot = self.get_pot(args)
for lang in self.get_langs(args):
po_path = os.path.join(self.i18n_dir, '%s.po' % lang)
if os.path.exists(po_path):
args.printer('Merging translations to %s' % po_path)
po = polib.pofile(po_path)
po.merge(pot)
else:
args.printer('Creating new translation file %s' % po_path)
po = polib.POFile()
po.merge(pot)
yield po, lang, po_path
def strip(self, args):
"""Strip the per-language .po files to remove extraneous data
The resulting files are suitable for version control systems.
"""
pos_with_info = self.merge(args)
for po, lang, po_path in pos_with_info:
args.printer('Stripping translations in %s' % po_path)
po[:] = [m for m in po if m.msgstr]
for message in po:
message.occurrences = []
message.flags = []
message.comment = message.tcomment = ''
yield po, lang, po_path
def save_pos(self, pos_with_info, args):
for po, lang, po_path in pos_with_info:
args.printer('Saving %s' % po_path)
po.save(po_path)
def print_stats(self, pos_with_info, args):
bar_len = 50
template = '{lang:5} {bar} {percent:3} {transl:{num_width}}/{all:{num_width}} {path}'
pos_with_info = list(pos_with_info)
results = []
for po, lang, po_path in pos_with_info:
n_translated = len(po.translated_entries())
n_all = len(po)
num_width = len(str(n_all))
try:
completion = n_translated / n_all
percent = '{0:2d}%'.format(int(round(100 * completion)))
if n_translated == n_all:
percent = 'ALL'
bar_full = int(round(bar_len * completion))
bar = '[{0:{1}}]'.format('=' * bar_full, bar_len)
except ZeroDivisionError:
completion = 0
percent = 'N/A'
bar = '[{0}]'.format(' ' * bar_len)
results.append(dict(
lang=lang, bar=bar, percent=percent, transl=n_translated,
all=n_all, num_width=num_width, path=po_path))
results.sort(key=lambda d: (-d['transl'], lang))
for result in results:
print(template.format(**result))
def run(self, argv):
"""Run as a command-line program
:param argv: sys.argv
"""
parser = argparse.ArgumentParser(prog=os.path.basename(argv[0]),
description='Manage translations for %s' % self.domain,
formatter_class=argparse.RawTextHelpFormatter,
add_help=False)
parser.add_argument('action', metavar='ACTION',
help=textwrap.dedent("""
The action to take:
update
Update .po files to be ready for translation.
To create a new rse.po file, give a new
language code to the --langs option.
strip
Remove unnecessary information from .po
files, making them suitable for committing to
a version control system.
Use `update` to "expand" the files again.
extract
Extract source messages to a .pot file.
That file can be used with translation tools,
or to speed up the update later.
Use --existing-pot with other actions to use
an existing pot file.
stats
Print stats about available translations
""").strip(),
default='help', nargs='?', type=six.text_type)
parser.add_argument('-h', '--help', dest='action',
action='store_const', const='help',
help=textwrap.dedent("""
Show this help message and exit.
""").strip())
parser.add_argument('-l', '--langs', metavar='LANGS',
help=textwrap.dedent("""
Identifiers of languages to use, comma-separated.
Examples: `-l en`, `-l en,cs,de`
Default: all currently available
""").strip(),
type=six.text_type)
parser.add_argument('--existing-pot',
help=textwrap.dedent("""
Use an existing pot file.
By default, fresh messages are extracted from the
source code each time they're used. With this flag,
they are read from an existing pot file.
""").strip(),
dest='cached_pot', action='store_const', const=True, default=False)
parser.add_argument('-n', '--dry-run',
help=textwrap.dedent("""
Do not write any files, just show what would be done
""").strip(),
dest='write', action='store_const', const=False, default=True)
parser.add_argument('-q', '--quiet',
help=textwrap.dedent("""
Don't print out progress messages.
""").strip(),
dest='printer', action='store_const',
const=lambda *a, **ka: None, default=print)
args = parser.parse_args(argv[1:])
action = args.action
if action == 'help':
parser.print_help()
exit(1)
elif action == 'extract':
pot = self.extract(args)
if args.write:
args.printer('Saving pot file %s' % self.pot_name)
pot.save(self.pot_name)
elif action == 'update':
self.save_pos(self.merge(args), args)
elif action == 'strip':
self.save_pos(self.strip(args), args)
elif action == 'stats':
args.printer = lambda *a, **ka: None
self.print_stats(self.merge(args), args)
else:
parser.error('Unknown action')
|
|
#!/usr/bin/env python
# coding: utf-8
# pylint: disable=C0111
# pylint: disable=C0103
# pylint: disable=C0330
from __future__ import print_function
import os
import sys
import argparse
import traceback
import webbrowser
import re
import json
import multiprocessing
from multiprocessing.pool import ThreadPool
from time import strftime
import dateparser
import pytz
import tweepy
from ebooklib import epub
VERSION = '1.1'
CONFIG_FILE = '.shut-up-bird.conf'
ARCHIVES_DIR = './shut-up-bird.arch'
TWEETS_EPUB = 'tweets.epub'
LIKES_EPUB = 'likes.epub'
PAR_TWEET = u'<blockquote class="ieverse"><p style="text-align:center;">\
<span style="text-align:left;display:inline-block;">{0}</span></p>\
</blockquote>'
#############################################################################
# Tweepy routines
def tweep_login(consumer_key, consumer_secret, token='', secret=''):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
if token and secret:
auth.set_access_token(token, secret)
else:
try:
print ("Authenticating ...please wait")
redirect_url = auth.get_authorization_url()
print ("Opening url - {0}".format(redirect_url))
webbrowser.open(redirect_url)
verify_code = get_input("Verification PIN code: ")
auth.get_access_token(verify_code)
except tweepy.TweepError as e:
raise Exception("Failed to get request token!", e)
return auth
def tweep_getAPI(auth):
api = tweepy.API(auth)
verbose("Authenticated as: {0}".format(api.me().screen_name))
limits = api.rate_limit_status()
statuses = limits['resources']['statuses']
verbose("Rates left:")
verbose("\tUser timeline: {0} / {1}".format(
statuses['/statuses/user_timeline']['remaining'],
statuses['/statuses/user_timeline']['limit']))
verbose("\tLookup: {0} / {1}".format(
statuses['/statuses/lookup']['remaining'],
statuses['/statuses/lookup']['limit']))
# verbose("\tMentions timeline: {0} / {1}".format(
# statuses['/statuses/mentions_timeline']['remaining'],
# statuses['/statuses/mentions_timeline']['limit']))
# verbose("\tRetweets: {0} / {1}".format(
# statuses['/statuses/retweets/:id']['remaining'],
# statuses['/statuses/retweets/:id']['limit']))
return api
def tweep_archive_tweets(api, max_id=None, max_date=None,
skip_retweets=False, skip_replies=False,
remove=False, ascending=False):
archive = archive_open(ARCHIVES_DIR, api.me())
statuses = []
delete_statuses = []
print ("Archiving {0} tweets ...".format(api.me().screen_name))
try:
for page in tweepy.Cursor(api.user_timeline, max_id=max_id).pages():
for status in page:
if max_date and pytz.utc.localize(status.created_at) > pytz.utc.localize(max_date):
verbose("Skipped tweet {0} on {1}".format(
status.id_str, status.created_at))
continue
if status.retweeted and skip_retweets:
verbose("Skipped retweet {0} on {1}".format(
status.id_str, status.created_at))
continue
if status.in_reply_to_status_id and skip_replies:
verbose("Skipped a reply tweet {0} on {1}".format(
status.id_str, status.created_at))
continue
if ascending:
statuses.append(status)
else:
archive_add(archive, status)
if remove:
delete_statuses.append(status.id)
# reverse add posts from the temp array
if ascending:
for status in reversed(statuses):
archive_add(archive, status)
archive_close(archive)
if remove:
tweep_delete_all(api, delete_statuses, tweep_delete_tweet)
except tweepy.RateLimitError as e:
raise Exception("Twitter API rate limit reached!", e)
except ValueError as e:
raise Exception("Could not parse status create time!", e)
def tweep_archive_likes(api, max_date=None, remove=False, ascending=False):
archive = archive_open(ARCHIVES_DIR, api.me(), isLikes=True)
likes = []
delete_likes = []
print ("Archiving {0} likes ...".format(api.me().screen_name))
try:
for page in tweepy.Cursor(api.favorites).pages():
for like in page:
if max_date and pytz.utc.localize(like.created_at) > pytz.utc.localize(max_date):
verbose("Skipped like {0} on {1}".format(
like.id_str, like.created_at))
continue
if ascending:
likes.append(like)
else:
archive_add(archive, like, addAuthor=True)
if remove:
delete_likes.append(like.id)
# reverse add likes from the temp array
if ascending:
for like in reversed(likes):
archive_add(archive, like, addAuthor=True)
archive_close(archive)
if remove:
tweep_delete_all(api, delete_likes, tweep_delete_like)
except tweepy.RateLimitError as e:
raise Exception("Twitter API rate limit reached!", e)
except ValueError as e:
raise Exception("Could not parse like create time!", e)
def tweep_delete_all(api, posts, func):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 2 # default
print ("Removing {0} entries in {1} parallel threads ...".format(
len(posts), cpus))
pool = ThreadPool(processes=cpus)
for status_id in posts:
pool.apply_async(func, args=(api, status_id,))
pool.close()
pool.join()
def tweep_delete_tweet(api, status_id):
verbose("Deleting status {0}".format(status_id))
try:
api.destroy_status(status_id)
except Exception as e:
print ("[ERROR] {0}".format(e))
def tweep_delete_like(api, like_id):
verbose("Removing like {0}".format(like_id))
try:
api.destroy_favorite(like_id)
except Exception as e:
print ("[ERROR] {0}".format(e))
#############################################################################
# Archive routines
def archive_open(dest_path, user, isLikes=False):
if not os.path.exists(dest_path):
os.mkdir(dest_path)
dir_path = os.path.join(dest_path, strftime("%Y-%m-%d_%H%M"))
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# ePub Stuff
book = epub.EpubBook()
book.set_identifier('id' + str(user.id))
book.set_title(("Tweets by" if not isLikes else "Twitter Likes of") + " @" + user.screen_name)
book.set_language(user.lang or 'en')
book.add_author(user.name or user.screen_name)
book.spine = ['nav']
return {'book': book, 'dest': dir_path,
'filename': LIKES_EPUB if isLikes else TWEETS_EPUB}
def archive_add(archive, status, addAuthor=False):
book = archive['book']
c = epub.EpubHtml(title='Intro', \
file_name='chap_' + str(status.id_str) + '.xhtml', \
lang=status.lang or 'en')
c.content = ''
if addAuthor and status.author:
if sys.version_info[0] >= 3:
screen_name = preprocess('@' + status.author._json['screen_name'])
else:
screen_name = preprocess('@' + status.author._json['screen_name'].encode('utf8'))
c.content = "<h5 align='center'>{0}</h5>".format(screen_name)
c.content += preprocess(status.text)
c.content += '<h6 align="center">' + status.created_at.strftime("%A, %d %b %Y %H:%M") + '</h6>'
book.add_item(c)
book.spine.append(c)
def archive_close(archive):
epub_dest = os.path.join(archive['dest'], archive['filename'])
print ("Saving ePub to {0} ...".format(epub_dest))
# add navigation files
archive['book'].add_item(epub.EpubNcx())
archive['book'].add_item(epub.EpubNav())
epub.write_epub(epub_dest, archive['book'], {})
return epub_dest
#############################################################################
# Config routines
def config_load(config_path):
if not os.path.exists(config_path):
return False
with open(config_path, 'r') as infile:
return json.load(infile)
def config_save(config_path, consumer_key, consumer_secret, token, secret):
data = {'ck': consumer_key, 'cs': consumer_secret, \
't': token, 's': secret}
with open(config_path, 'w') as outfile:
json.dump(data, outfile, indent=2, ensure_ascii=False)
def conf_get_parser():
parser = argparse.ArgumentParser(add_help=True,
description="So you're stuck, eh? Here're some hints.")
parser.add_argument('-V', '--version',
help="""prints how old Smeagol is""",
action="store_true", default=False)
parser.add_argument('-v', '--verbose',
help="""verbose (See what's happening)""",
action="store_true", default=False)
parser.add_argument('-id', '--max-id',
help="""archives all statuses with an ID less than
(older than) or equal to the specified""")
parser.add_argument('-l', '--likes',
help="""archives likes only""",
action="store_true", default=False)
parser.add_argument('-dt', '--max-date',
help="""archives all statuses or likes with a post date earlier than
or equal to the specified. Sample format: 2016-11-01 23:00:00+02:00""")
parser.add_argument('-a', '--asc',
help="""adds tweets in ascending date order""",
action="store_true", default=False)
parser.add_argument('-rt', '--no-retweet',
help="""skips retweets""",
action="store_true", default=False)
parser.add_argument('-re', '--no-reply',
help="""skips reply tweets""",
action="store_true", default=False)
parser.add_argument('--remove',
help="""removes all archived tweets.
*** WARNING!!! This action is irreversible! ***""",
action="store_true", default=False)
return parser
#############################################################################
# Misc routines
def verbose(message):
if g_verbose:
print (message)
def get_input(message):
try:
return raw_input(message)
except NameError:
return input(message)
def preprocess(text):
# thx dude! - stackoverflow.com/a/7254397
text = re.sub(r'(?<!"|>)(ht|f)tps?://.*?(?=\s|$)',
r'<a href="\g<0>">\g<0></a>', text)
# thx dude! x2 - gist.github.com/mahmoud/237eb20108b5805aed5f
text = re.sub(r'(?:^|\s)[@]{1}([^\s#<>[\]|{}]+)',
r'<a href="https://twitter.com/\1">@\1</a>', text)
text = re.sub(r'(?:^|\s)[#]{1}(\w+)',
r' <a href="https://twitter.com/hashtag/\1">#\1</a>', text)
return PAR_TWEET.format(text)
def excerpt(text):
text = re.sub(r'@(.*?)\S*', '', text)
return text[0:15] + ' ...'
#############################################################################
# Main
if __name__ == "__main__":
try:
home_dir = os.path.expanduser('~')
config = config_load(os.path.join(home_dir, CONFIG_FILE))
if config and config['t'] and config['s']:
g_auth = tweep_login(config['ck'], config['cs'], config['t'], config['s'])
else:
print ("Please provide your Twitter app access keys\n")
g_consumer_key = get_input("Consumer API Key: ")
g_consumer_secret = get_input("Consumer API Secret: ")
g_auth = tweep_login(g_consumer_key, g_consumer_secret)
config_save(os.path.join(home_dir, CONFIG_FILE), g_consumer_key, \
g_consumer_secret, g_auth.access_token, g_auth.access_token_secret)
g_parser = conf_get_parser()
args = g_parser.parse_args()
g_verbose = args.verbose
g_max_date = None
if args.version:
print ('{} {}'.format(
os.path.basename(__file__).rstrip('.py'), VERSION))
sys.exit(-1)
elif not args.max_id and not args.max_date and not args.likes:
g_parser.print_help()
sys.exit(-1)
elif args.max_date:
g_max_date = dateparser.parse(args.max_date)
verbose("** All entries till: {0}".format(g_max_date))
if args.remove:
print ("** WARNING: Archvied statuses will be removed from your Twitter account!")
if args.likes:
tweep_archive_likes(tweep_getAPI(g_auth),
max_date=g_max_date, ascending=args.asc, remove=args.remove)
else:
tweep_archive_tweets(tweep_getAPI(g_auth), max_id=args.max_id,
max_date=g_max_date, skip_replies=args.no_reply,
skip_retweets=args.no_retweet, ascending=args.asc,
remove=args.remove)
except tweepy.TweepError as e:
traceback.print_exc(file=sys.stdout)
print ("[ERROR] {0}".format(e))
if e.response.status_code == 429:
print ("""The maximum number of requests that are allowed is based on a time interval, some specified period or window of time. The most common request limit interval is fifteen minutes. If an endpoint has a rate limit of 900 requests/15-minutes, then up to 900 requests over any 15-minute interval is allowed.""")
except Exception as e:
traceback.print_exc(file=sys.stdout)
print ("[ERROR] {0}".format(e))
|
|
from __future__ import print_function
import os, sys
import unittest
import copy
import math
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Geometry import rdGeometry as geom
def feq(v1, v2, tol=1.0e-4):
return abs(v1 - v2) < tol
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1aPoint3D(self):
pt = geom.Point3D()
self.assertTrue(feq(pt.x, 0.0))
self.assertTrue(feq(pt.y, 0.0))
self.assertTrue(feq(pt.z, 0.0))
pt = geom.Point3D(3., 4., 5.)
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
self.assertTrue(feq(pt.z, 5.0))
self.assertTrue(feq(pt[0], 3.0))
self.assertTrue(feq(pt[1], 4.0))
self.assertTrue(feq(pt[2], 5.0))
self.assertTrue(feq(pt[-3], 3.0))
self.assertTrue(feq(pt[-2], 4.0))
self.assertTrue(feq(pt[-1], 5.0))
lst = list(pt)
self.assertTrue(feq(lst[0], 3.0))
self.assertTrue(feq(lst[1], 4.0))
self.assertTrue(feq(lst[2], 5.0))
pt2 = geom.Point3D(1., 1., 1.)
pt3 = pt + pt2
self.assertTrue(feq(pt3.x, 4.0))
self.assertTrue(feq(pt3.y, 5.0))
self.assertTrue(feq(pt3.z, 6.0))
pt += pt2
self.assertTrue(feq(pt.x, 4.0))
self.assertTrue(feq(pt.y, 5.0))
self.assertTrue(feq(pt.z, 6.0))
pt3 = pt - pt2
self.assertTrue(feq(pt3.x, 3.0))
self.assertTrue(feq(pt3.y, 4.0))
self.assertTrue(feq(pt3.z, 5.0))
pt -= pt2
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
self.assertTrue(feq(pt.z, 5.0))
pt *= 2.0
self.assertTrue(feq(pt.x, 6.0))
self.assertTrue(feq(pt.y, 8.0))
self.assertTrue(feq(pt.z, 10.0))
pt /= 2
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
self.assertTrue(feq(pt.z, 5.0))
self.assertTrue(feq(pt.Length(), 7.0711))
self.assertTrue(feq(pt.LengthSq(), 50.0))
pt.Normalize()
self.assertTrue(feq(pt.Length(), 1.0))
pt1 = geom.Point3D(1.0, 0.0, 0.0)
pt2 = geom.Point3D(2.0 * math.cos(math.pi / 6), 2.0 * math.sin(math.pi / 6), 0.0)
ang = pt1.AngleTo(pt2)
self.assertTrue(feq(ang, math.pi / 6))
prod = pt1.DotProduct(pt2)
self.assertTrue(feq(prod, 2.0 * math.cos(math.pi / 6)))
pt3 = pt1.CrossProduct(pt2)
self.assertTrue(feq(pt3.x, 0.0))
self.assertTrue(feq(pt3.y, 0.0))
self.assertTrue(feq(pt3.z, 1.0))
def test1bPoint2D(self):
pt = geom.Point2D()
self.assertTrue(feq(pt.x, 0.0))
self.assertTrue(feq(pt.y, 0.0))
pt = geom.Point2D(3., 4.)
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
self.assertTrue(feq(pt[0], 3.0))
self.assertTrue(feq(pt[1], 4.0))
self.assertTrue(feq(pt[-2], 3.0))
self.assertTrue(feq(pt[-1], 4.0))
lst = list(pt)
self.assertTrue(feq(lst[0], 3.0))
self.assertTrue(feq(lst[1], 4.0))
pt2 = geom.Point2D(1., 1.)
pt3 = pt + pt2
self.assertTrue(feq(pt3.x, 4.0))
self.assertTrue(feq(pt3.y, 5.0))
pt += pt2
self.assertTrue(feq(pt.x, 4.0))
self.assertTrue(feq(pt.y, 5.0))
pt3 = pt - pt2
self.assertTrue(feq(pt3.x, 3.0))
self.assertTrue(feq(pt3.y, 4.0))
pt -= pt2
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
pt *= 2.0
self.assertTrue(feq(pt.x, 6.0))
self.assertTrue(feq(pt.y, 8.0))
pt /= 2
self.assertTrue(feq(pt.x, 3.0))
self.assertTrue(feq(pt.y, 4.0))
self.assertTrue(feq(pt.Length(), 5.0))
self.assertTrue(feq(pt.LengthSq(), 25.0))
pt.Normalize()
self.assertTrue(feq(pt.Length(), 1.0))
pt1 = geom.Point2D(1.0, 0.0)
pt2 = geom.Point2D(2.0 * math.cos(math.pi / 6), 2.0 * math.sin(math.pi / 6))
ang = pt1.AngleTo(pt2)
self.assertTrue(feq(ang, math.pi / 6))
prod = pt1.DotProduct(pt2)
self.assertTrue(feq(prod, 2.0 * math.cos(math.pi / 6)))
def test1cPointND(self):
dim = 4
pt = geom.PointND(4)
for i in range(dim):
self.assertTrue(feq(pt[i], 0.0))
pt[0] = 3
pt[3] = 4
self.assertTrue(feq(pt[0], 3.0))
self.assertTrue(feq(pt[3], 4.0))
self.assertTrue(feq(pt[-4], 3.0))
self.assertTrue(feq(pt[-1], 4.0))
lst = list(pt)
self.assertTrue(feq(lst[0], 3.0))
self.assertTrue(feq(lst[3], 4.0))
pt2 = geom.PointND(4)
pt2[0] = 1.
pt2[2] = 1.
pt3 = pt + pt2
self.assertTrue(feq(pt3[0], 4.0))
self.assertTrue(feq(pt3[2], 1.0))
self.assertTrue(feq(pt3[3], 4.0))
pt += pt2
self.assertTrue(feq(pt[0], 4.0))
self.assertTrue(feq(pt[2], 1.0))
self.assertTrue(feq(pt[3], 4.0))
pt3 = pt - pt2
self.assertTrue(feq(pt3[0], 3.0))
self.assertTrue(feq(pt3[2], 0.0))
self.assertTrue(feq(pt3[3], 4.0))
pt -= pt2
self.assertTrue(feq(pt[0], 3.0))
self.assertTrue(feq(pt[2], 0.0))
self.assertTrue(feq(pt[3], 4.0))
pt *= 2.0
self.assertTrue(feq(pt[0], 6.0))
self.assertTrue(feq(pt[1], 0.0))
self.assertTrue(feq(pt[2], 0.0))
self.assertTrue(feq(pt[3], 8.0))
pt /= 2
self.assertTrue(feq(pt[0], 3.0))
self.assertTrue(feq(pt[1], 0.0))
self.assertTrue(feq(pt[2], 0.0))
self.assertTrue(feq(pt[3], 4.0))
self.assertTrue(feq(pt.Length(), 5.0))
self.assertTrue(feq(pt.LengthSq(), 25.0))
pt.Normalize()
self.assertTrue(feq(pt.Length(), 1.0))
pkl = cPickle.dumps(pt)
pt2 = cPickle.loads(pkl)
self.assertTrue(len(pt) == len(pt2))
for i in range(len(pt)):
self.assertTrue(feq(pt2[i], pt[i]))
def test3UniformGrid(self):
ugrid = geom.UniformGrid3D(20, 18, 15)
self.assertTrue(ugrid.GetNumX() == 40)
self.assertTrue(ugrid.GetNumY() == 36)
self.assertTrue(ugrid.GetNumZ() == 30)
dvect = ugrid.GetOccupancyVect()
ugrid = geom.UniformGrid3D(20, 18, 15, 0.5, DataStructs.DiscreteValueType.TWOBITVALUE)
dvect = ugrid.GetOccupancyVect()
self.assertTrue(dvect.GetValueType() == DataStructs.DiscreteValueType.TWOBITVALUE)
grd = geom.UniformGrid3D(10.0, 10.0, 10.0, 0.5)
grd.SetSphereOccupancy(geom.Point3D(-2.0, -2.0, 0.0), 1.5, 0.25)
grd.SetSphereOccupancy(geom.Point3D(-2.0, 2.0, 0.0), 1.5, 0.25)
grd.SetSphereOccupancy(geom.Point3D(2.0, -2.0, 0.0), 1.5, 0.25)
grd.SetSphereOccupancy(geom.Point3D(2.0, 2.0, 0.0), 1.5, 0.25)
geom.WriteGridToFile(grd, "junk.grd")
grd2 = geom.UniformGrid3D(10.0, 10.0, 10.0, 0.5)
grd2.SetSphereOccupancy(geom.Point3D(-2.0, -2.0, 0.0), 1.5, 0.25)
grd2.SetSphereOccupancy(geom.Point3D(-2.0, 2.0, 0.0), 1.5, 0.25)
grd2.SetSphereOccupancy(geom.Point3D(2.0, -2.0, 0.0), 1.5, 0.25)
dist = geom.TanimotoDistance(grd, grd2)
self.assertTrue(dist == 0.25)
dist = geom.ProtrudeDistance(grd, grd2)
self.assertTrue(dist == 0.25)
dist = geom.ProtrudeDistance(grd2, grd)
self.assertTrue(dist == 0.0)
grd2 = geom.UniformGrid3D(10.0, 10.0, 10.0, 0.5, DataStructs.DiscreteValueType.FOURBITVALUE)
grd2.SetSphereOccupancy(geom.Point3D(-2.0, -2.0, 0.0), 1.5, 0.25, 3)
grd2.SetSphereOccupancy(geom.Point3D(-2.0, 2.0, 0.0), 1.5, 0.25, 3)
grd2.SetSphereOccupancy(geom.Point3D(2.0, -2.0, 0.0), 1.5, 0.25, 3)
self.assertRaises(ValueError, lambda: geom.TanimotoDistance(grd, grd2))
grd2 = geom.UniformGrid3D(10.0, 10.0, 10.0, 1.0)
self.assertRaises(ValueError, lambda: geom.TanimotoDistance(grd, grd2))
grd2 = geom.UniformGrid3D(11.0, 10.0, 10.0, 1.0)
self.assertRaises(ValueError, lambda: geom.TanimotoDistance(grd, grd2))
def testSymmetry(self):
grd = geom.UniformGrid3D(10.0, 10.0, 10.0, 0.5)
grd.SetSphereOccupancy(geom.Point3D(-2.2, -2.0, 0.0), 1.65, 0.25)
grd.SetSphereOccupancy(geom.Point3D(2.2, -2.0, 0.0), 1.65, 0.25)
bPt1 = geom.Point3D(-4.0, -2.0, -2.0)
bPt2 = geom.Point3D(4.0, -2.0, -2.0)
for k in range(8):
bPt1 += geom.Point3D(0.0, 0.0, 0.5)
bPt2 += geom.Point3D(0.0, 0.0, 0.5)
for j in range(8):
bPt1 += geom.Point3D(0.0, 0.5, 0.0)
bPt2 += geom.Point3D(0.0, 0.5, 0.0)
for i in range(8):
bPt1 += geom.Point3D(0.5, 0.0, 0.0)
bPt2 -= geom.Point3D(0.5, 0.0, 0.0)
self.assertTrue(grd.GetValPoint(bPt1) == grd.GetValPoint(bPt2))
bPt1.x = -4.0
bPt2.x = 4.0
bPt1.y = -2.0
bPt2.y = -2.0
def testPointPickles(self):
pt = geom.Point3D(2.0, -3.0, 1.0)
pt2 = cPickle.loads(cPickle.dumps(pt))
self.assertTrue(feq(pt.x, pt2.x, 1e-6))
self.assertTrue(feq(pt.y, pt2.y, 1e-6))
self.assertTrue(feq(pt.z, pt2.z, 1e-6))
pt = geom.Point2D(2.0, -4.0)
pt2 = cPickle.loads(cPickle.dumps(pt))
self.assertTrue(feq(pt.x, pt2.x, 1e-6))
self.assertTrue(feq(pt.y, pt2.y, 1e-6))
def test4GridPickles(self):
grd = geom.UniformGrid3D(10.0, 9.0, 8.0, 0.5)
self.assertTrue(grd.GetNumX() == 20)
self.assertTrue(grd.GetNumY() == 18)
self.assertTrue(grd.GetNumZ() == 16)
grd.SetSphereOccupancy(geom.Point3D(-2.0, -2.0, 0.0), 1.5, 0.25)
grd.SetSphereOccupancy(geom.Point3D(-2.0, 2.0, 0.0), 1.5, 0.25)
grd.SetSphereOccupancy(geom.Point3D(2.0, -2.0, 0.0), 1.5, 0.25)
grd.SetSphereOccupancy(geom.Point3D(2.0, 2.0, 0.0), 1.5, 0.25)
self.assertTrue(geom.TanimotoDistance(grd, grd) == 0.0)
grd2 = cPickle.loads(cPickle.dumps(grd))
self.assertTrue(grd2.GetNumX() == 20)
self.assertTrue(grd2.GetNumY() == 18)
self.assertTrue(grd2.GetNumZ() == 16)
self.assertTrue(geom.TanimotoDistance(grd, grd2) == 0.0)
def test5GridOps(self):
grd = geom.UniformGrid3D(10, 10, 10)
grd.SetSphereOccupancy(geom.Point3D(-2.0, -2.0, 0.0), 1.0, 0.25)
grd.SetSphereOccupancy(geom.Point3D(-2.0, 2.0, 0.0), 1.0, 0.25)
grd2 = geom.UniformGrid3D(10, 10, 10)
grd2.SetSphereOccupancy(geom.Point3D(2.0, -2.0, 0.0), 1.0, 0.25)
grd2.SetSphereOccupancy(geom.Point3D(2.0, 2.0, 0.0), 1.0, 0.25)
self.assertTrue(geom.TanimotoDistance(grd, grd) == 0.0)
self.assertTrue(geom.TanimotoDistance(grd, grd2) == 1.0)
grd3 = copy.deepcopy(grd)
grd3 |= grd2
self.assertTrue(geom.TanimotoDistance(grd3, grd) == .5)
self.assertTrue(geom.TanimotoDistance(grd3, grd2) == .5)
grd3 = copy.deepcopy(grd)
grd3 += grd2
self.assertTrue(geom.TanimotoDistance(grd3, grd) == .5)
self.assertTrue(geom.TanimotoDistance(grd3, grd2) == .5)
grd3 -= grd
self.assertTrue(geom.TanimotoDistance(grd3, grd) == 1.0)
self.assertTrue(geom.TanimotoDistance(grd3, grd2) == 0)
grd4 = geom.UniformGrid3D(10, 10, 10)
grd4.SetSphereOccupancy(geom.Point3D(-2.0, -2.0, 0.0), 1.0, 0.25)
grd4.SetSphereOccupancy(geom.Point3D(-2.0, 2.0, 0.0), 1.0, 0.25)
grd4.SetSphereOccupancy(geom.Point3D(2.0, -2.0, 0.0), 1.0, 0.25)
self.assertTrue(feq(geom.TanimotoDistance(grd4, grd), .3333))
self.assertTrue(feq(geom.TanimotoDistance(grd4, grd2), .75))
grd4 &= grd2
self.assertTrue(feq(geom.TanimotoDistance(grd4, grd), 1.0))
self.assertTrue(feq(geom.TanimotoDistance(grd4, grd2), .5))
def test6Dihedrals(self):
p1 = geom.Point3D(1, 0, 0)
p2 = geom.Point3D(0, 0, 0)
p3 = geom.Point3D(0, 1, 0)
p4 = geom.Point3D(.5, 1, .5)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi / 4, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, -math.pi / 4, 4)
p4 = geom.Point3D(-.5, 1, .5)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, 3 * math.pi / 4, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, -3 * math.pi / 4, 4)
p4 = geom.Point3D(.5, 1, -.5)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi / 4, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi / 4, 4)
p4 = geom.Point3D(-.5, 1, -.5)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, 3 * math.pi / 4, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, 3 * math.pi / 4, 4)
p4 = geom.Point3D(0, 1, 1)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi / 2, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, -math.pi / 2, 4)
p4 = geom.Point3D(0, 1, -1)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi / 2, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi / 2, 4)
p4 = geom.Point3D(1, 1, 0)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, 0, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, 0, 4)
p4 = geom.Point3D(-1, 1, 0)
ang = geom.ComputeDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi, 4)
ang = geom.ComputeSignedDihedralAngle(p1, p2, p3, p4)
self.assertAlmostEqual(ang, math.pi, 4)
def test7UniformGridIndices(self):
ugrid = geom.UniformGrid3D(20, 18, 15)
idx = ugrid.GetGridIndex(3, 2, 1)
xi, yi, zi = ugrid.GetGridIndices(idx)
self.assertEqual(xi, 3)
self.assertEqual(yi, 2)
self.assertEqual(zi, 1)
if __name__ == '__main__':
print("Testing Geometry wrapper")
unittest.main()
|
|
"""Support for RainMachine devices."""
from __future__ import annotations
import asyncio
from datetime import timedelta
from functools import partial
from typing import Any, cast
from regenmaschine import Client
from regenmaschine.controller import Controller
from regenmaschine.errors import RainMachineError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigEntryState
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
entity_registry as er,
)
from homeassistant.helpers.entity import DeviceInfo, EntityDescription
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util.network import is_ip_address
from .config_flow import get_client_controller
from .const import (
CONF_ZONE_RUN_TIME,
DATA_CONTROLLER,
DATA_COORDINATOR,
DATA_PROGRAMS,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DATA_ZONES,
DOMAIN,
LOGGER,
)
DEFAULT_ATTRIBUTION = "Data provided by Green Electronics LLC"
DEFAULT_ICON = "mdi:water"
DEFAULT_SSL = True
DEFAULT_UPDATE_INTERVAL = timedelta(seconds=15)
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR, Platform.SWITCH]
UPDATE_INTERVALS = {
DATA_PROVISION_SETTINGS: timedelta(minutes=1),
DATA_PROGRAMS: timedelta(seconds=30),
DATA_RESTRICTIONS_CURRENT: timedelta(minutes=1),
DATA_RESTRICTIONS_UNIVERSAL: timedelta(minutes=1),
DATA_ZONES: timedelta(seconds=15),
}
# Constants expected by the RainMachine API for Service Data
CONF_CONDITION = "condition"
CONF_DEWPOINT = "dewpoint"
CONF_ET = "et"
CONF_MAXRH = "maxrh"
CONF_MAXTEMP = "maxtemp"
CONF_MINRH = "minrh"
CONF_MINTEMP = "mintemp"
CONF_PRESSURE = "pressure"
CONF_QPF = "qpf"
CONF_RAIN = "rain"
CONF_SECONDS = "seconds"
CONF_SOLARRAD = "solarrad"
CONF_TEMPERATURE = "temperature"
CONF_TIMESTAMP = "timestamp"
CONF_WEATHER = "weather"
CONF_WIND = "wind"
# Config Validators for Weather Service Data
CV_WX_DATA_VALID_PERCENTAGE = vol.All(vol.Coerce(int), vol.Range(min=0, max=100))
CV_WX_DATA_VALID_TEMP_RANGE = vol.All(vol.Coerce(float), vol.Range(min=-40.0, max=40.0))
CV_WX_DATA_VALID_RAIN_RANGE = vol.All(vol.Coerce(float), vol.Range(min=0.0, max=1000.0))
CV_WX_DATA_VALID_WIND_SPEED = vol.All(vol.Coerce(float), vol.Range(min=0.0, max=65.0))
CV_WX_DATA_VALID_PRESSURE = vol.All(vol.Coerce(float), vol.Range(min=60.0, max=110.0))
CV_WX_DATA_VALID_SOLARRAD = vol.All(vol.Coerce(float), vol.Range(min=0.0, max=5.0))
SERVICE_NAME_PAUSE_WATERING = "pause_watering"
SERVICE_NAME_PUSH_WEATHER_DATA = "push_weather_data"
SERVICE_NAME_STOP_ALL = "stop_all"
SERVICE_NAME_UNPAUSE_WATERING = "unpause_watering"
SERVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_ID): cv.string,
}
)
SERVICE_PAUSE_WATERING_SCHEMA = SERVICE_SCHEMA.extend(
{
vol.Required(CONF_SECONDS): cv.positive_int,
}
)
SERVICE_PUSH_WEATHER_DATA_SCHEMA = SERVICE_SCHEMA.extend(
{
vol.Optional(CONF_TIMESTAMP): cv.positive_float,
vol.Optional(CONF_MINTEMP): CV_WX_DATA_VALID_TEMP_RANGE,
vol.Optional(CONF_MAXTEMP): CV_WX_DATA_VALID_TEMP_RANGE,
vol.Optional(CONF_TEMPERATURE): CV_WX_DATA_VALID_TEMP_RANGE,
vol.Optional(CONF_WIND): CV_WX_DATA_VALID_WIND_SPEED,
vol.Optional(CONF_SOLARRAD): CV_WX_DATA_VALID_SOLARRAD,
vol.Optional(CONF_QPF): CV_WX_DATA_VALID_RAIN_RANGE,
vol.Optional(CONF_RAIN): CV_WX_DATA_VALID_RAIN_RANGE,
vol.Optional(CONF_ET): CV_WX_DATA_VALID_RAIN_RANGE,
vol.Optional(CONF_MINRH): CV_WX_DATA_VALID_PERCENTAGE,
vol.Optional(CONF_MAXRH): CV_WX_DATA_VALID_PERCENTAGE,
vol.Optional(CONF_CONDITION): cv.string,
vol.Optional(CONF_PRESSURE): CV_WX_DATA_VALID_PRESSURE,
vol.Optional(CONF_DEWPOINT): CV_WX_DATA_VALID_TEMP_RANGE,
}
)
@callback
def async_get_controller_for_service_call(
hass: HomeAssistant, call: ServiceCall
) -> Controller:
"""Get the controller related to a service call (by device ID)."""
device_id = call.data[CONF_DEVICE_ID]
device_registry = dr.async_get(hass)
if device_entry := device_registry.async_get(device_id):
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.entry_id in device_entry.config_entries:
return cast(
Controller, hass.data[DOMAIN][entry.entry_id][DATA_CONTROLLER]
)
raise ValueError(f"No controller for device ID: {device_id}")
async def async_update_programs_and_zones(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Update program and zone DataUpdateCoordinators.
Program and zone updates always go together because of how linked they are:
programs affect zones and certain combinations of zones affect programs.
"""
await asyncio.gather(
*[
hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR][
DATA_PROGRAMS
].async_refresh(),
hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR][
DATA_ZONES
].async_refresh(),
]
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up RainMachine as config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(session=websession)
try:
await client.load_local(
entry.data[CONF_IP_ADDRESS],
entry.data[CONF_PASSWORD],
port=entry.data[CONF_PORT],
ssl=entry.data.get(CONF_SSL, DEFAULT_SSL),
)
except RainMachineError as err:
raise ConfigEntryNotReady from err
# regenmaschine can load multiple controllers at once, but we only grab the one
# we loaded above:
controller = get_client_controller(client)
entry_updates: dict[str, Any] = {}
if not entry.unique_id or is_ip_address(entry.unique_id):
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = controller.mac
if CONF_ZONE_RUN_TIME in entry.data:
# If a zone run time exists in the config entry's data, pop it and move it to
# options:
data = {**entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**entry.options,
CONF_ZONE_RUN_TIME: data.pop(CONF_ZONE_RUN_TIME),
}
if entry_updates:
hass.config_entries.async_update_entry(entry, **entry_updates)
async def async_update(api_category: str) -> dict:
"""Update the appropriate API data based on a category."""
data: dict = {}
try:
if api_category == DATA_PROGRAMS:
data = await controller.programs.all(include_inactive=True)
elif api_category == DATA_PROVISION_SETTINGS:
data = await controller.provisioning.settings()
elif api_category == DATA_RESTRICTIONS_CURRENT:
data = await controller.restrictions.current()
elif api_category == DATA_RESTRICTIONS_UNIVERSAL:
data = await controller.restrictions.universal()
else:
data = await controller.zones.all(details=True, include_inactive=True)
except RainMachineError as err:
raise UpdateFailed(err) from err
return data
controller_init_tasks = []
coordinators = {}
for api_category in (
DATA_PROGRAMS,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DATA_ZONES,
):
coordinator = coordinators[api_category] = DataUpdateCoordinator(
hass,
LOGGER,
name=f'{controller.name} ("{api_category}")',
update_interval=UPDATE_INTERVALS[api_category],
update_method=partial(async_update, api_category),
)
controller_init_tasks.append(coordinator.async_refresh())
await asyncio.gather(*controller_init_tasks)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_CONTROLLER: controller,
DATA_COORDINATOR: coordinators,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
async def async_pause_watering(call: ServiceCall) -> None:
"""Pause watering for a set number of seconds."""
controller = async_get_controller_for_service_call(hass, call)
await controller.watering.pause_all(call.data[CONF_SECONDS])
await async_update_programs_and_zones(hass, entry)
async def async_push_weather_data(call: ServiceCall) -> None:
"""Push weather data to the device."""
controller = async_get_controller_for_service_call(hass, call)
await controller.parsers.post_data(
{
CONF_WEATHER: [
{
key: value
for key, value in call.data.items()
if key != CONF_DEVICE_ID
}
]
}
)
async def async_stop_all(call: ServiceCall) -> None:
"""Stop all watering."""
controller = async_get_controller_for_service_call(hass, call)
await controller.watering.stop_all()
await async_update_programs_and_zones(hass, entry)
async def async_unpause_watering(call: ServiceCall) -> None:
"""Unpause watering."""
controller = async_get_controller_for_service_call(hass, call)
await controller.watering.unpause_all()
await async_update_programs_and_zones(hass, entry)
for service_name, schema, method in (
(
SERVICE_NAME_PAUSE_WATERING,
SERVICE_PAUSE_WATERING_SCHEMA,
async_pause_watering,
),
(
SERVICE_NAME_PUSH_WEATHER_DATA,
SERVICE_PUSH_WEATHER_DATA_SCHEMA,
async_push_weather_data,
),
(SERVICE_NAME_STOP_ALL, SERVICE_SCHEMA, async_stop_all),
(SERVICE_NAME_UNPAUSE_WATERING, SERVICE_SCHEMA, async_unpause_watering),
):
if hass.services.has_service(DOMAIN, service_name):
continue
hass.services.async_register(DOMAIN, service_name, method, schema=schema)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an RainMachine config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
loaded_entries = [
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.state == ConfigEntryState.LOADED
]
if len(loaded_entries) == 1:
# If this is the last loaded instance of RainMachine, deregister any services
# defined during integration setup:
for service_name in (
SERVICE_NAME_PAUSE_WATERING,
SERVICE_NAME_PUSH_WEATHER_DATA,
SERVICE_NAME_STOP_ALL,
SERVICE_NAME_UNPAUSE_WATERING,
):
hass.services.async_remove(DOMAIN, service_name)
return unload_ok
async def async_migrate_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Migrate an old config entry."""
version = entry.version
LOGGER.debug("Migrating from version %s", version)
# 1 -> 2: Update unique IDs to be consistent across platform (including removing
# the silly removal of colons in the MAC address that was added originally):
if version == 1:
version = entry.version = 2
@callback
def migrate_unique_id(entity_entry: er.RegistryEntry) -> dict[str, Any]:
"""Migrate the unique ID to a new format."""
unique_id_pieces = entity_entry.unique_id.split("_")
old_mac = unique_id_pieces[0]
new_mac = ":".join(old_mac[i : i + 2] for i in range(0, len(old_mac), 2))
unique_id_pieces[0] = new_mac
if entity_entry.entity_id.startswith("switch"):
unique_id_pieces[1] = unique_id_pieces[1][11:].lower()
return {"new_unique_id": "_".join(unique_id_pieces)}
await er.async_migrate_entries(hass, entry.entry_id, migrate_unique_id)
LOGGER.info("Migration to version %s successful", version)
return True
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle an options update."""
await hass.config_entries.async_reload(entry.entry_id)
class RainMachineEntity(CoordinatorEntity):
"""Define a generic RainMachine entity."""
def __init__(
self,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator,
controller: Controller,
description: EntityDescription,
) -> None:
"""Initialize."""
super().__init__(coordinator)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, controller.mac)},
configuration_url=f"https://{entry.data[CONF_IP_ADDRESS]}:{entry.data[CONF_PORT]}",
connections={(dr.CONNECTION_NETWORK_MAC, controller.mac)},
name=str(controller.name),
manufacturer="RainMachine",
model=(
f"Version {controller.hardware_version} "
f"(API: {controller.api_version})"
),
sw_version=controller.software_version,
)
self._attr_extra_state_attributes = {}
self._attr_name = f"{controller.name} {description.name}"
self._attr_unique_id = f"{controller.mac}_{description.key}"
self._controller = controller
self.entity_description = description
@callback
def _handle_coordinator_update(self) -> None:
"""Respond to a DataUpdateCoordinator update."""
self.update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self.update_from_latest_data()
@callback
def update_from_latest_data(self) -> None:
"""Update the state."""
raise NotImplementedError
|
|
##
# \namespace cross3d.studiomax.matlib
#
# \remarks This package contains material editing methods for Studiomax
#
# \author eric
# \author Blur Studio
# \date 01/06/11
#
from Py3dsMax import mxs
def buildMaterialFrom( material, opacityMap = None, displacementMap = None, bumpMap = None ):
"""
\remarks creates a new material using the properties from the inputed material as its base,
creating the material with an inputed opacity and displacement map overrides
\param material <Py3dsMax.mxs.Material>
\param opacityMap <Py3dsMax.mxs.Map>
\param displacementMap <Py3dsMax.mxs.Map>
\param bumpMap <Py3dsMax.mxs.Map>
\return <Py3dsMax.mxs.Material> builtMaterial
"""
# if there is no opacity of displacement map, then there is no need to modify the inputed material
if ( not (opacityMap or displacementMap or bumpMap) ):
return material
# store useful methods
mcopy = mxs.copy
class_of = mxs.classof
is_prop = mxs.isproperty
cls = class_of( material )
#----------------------------------------
# GK 02/05/10 if texture is nested in a "Displacement 3D" or "Height Map" texture, get the root map
# and use it in the material's own displacement slot. (trying to maintain some comaptibility between vray and mental ray here.)
# if not nested, we must convert to mr connection displacement and put it there anyway since max's displacement spinner
# does not correlate correctly to mental ray displacement amounts.
displacementTexture = None
# extract from a Displacement_3d texture
if ( class_of( displacementMap ) == mxs.Displacement_3D__3dsmax ):
displacementTexture = displacementMap
displacementMap = displacementMap.map
# extract from a Height_Map texture
elif ( class_of( displacementMap ) == mxs.Height_Map_Displacement__3dsmax ):
displacementTexture = displacementMap
displacementMap = displacementMap.heightMap
#----------------------------------------
# build a matte shadow reflection material
if ( cls in (mxs.Matte_Shadow_Reflection__mi,mxs.mr_Matte_Shadow_Reflection_Mtl) ):
matteMtl = mcopy( material )
matteMtl.opacity_shader = opacityMap
output = mxs.mental_ray()
output.surface = mxs.Material_to_Shader()
output.surface.material = matteMtl
if ( displacementTexture ):
dispTexture = mxs.Displacement_3D__3dsmax()
dispTexture.map = displacementMap
output.displacement = displacementTexture
output.bump = bumpMap
return output
# build a standard material
elif ( cls == mxs.StandardMaterial ):
output = mcopy( material )
# use the opacity map
if ( opacityMap ):
output.opacityMap = opacityMap
# use the bump map
if ( bumpMap ):
output.bumpMap = bumpMap
output.bumpMapAmount = 100
# use the displacement map
if ( displacementMap ):
output.displacementMap = displacementMap
output.displacementMapEnable = True
if ( is_prop( output, 'mental_ray__material_custom_attribute' ) ):
if ( not displacementTexture ):
displacementTexture = mxs.Displacement_3D__3dsmax()
displacementTexture.map = displacementMap
output.mental_ray__material_custom_attribute.displacement = displacementTexture
output.mental_ray__material_custom_attribute.displacementLocked = False
return output
# build a Vray material
elif ( cls == mxs.VrayMtl ):
output = mcopy( material )
# use the bump map
if ( bumpMap ):
output.texmap_bump = bumpMap
output.texmap_bump_on = True
output.texmap_bump_multiplier = 100
# use the opacity map
if ( opacityMap ):
output.texmap_opacity = opacityMap
output.texmap_opacity_on = True
output.texmap_opacity_multiplier = 100
# use the displacementmap
if ( displacementMap ):
output.texmap_displacement = displacementMap
output.texmap_displacement_on = True
output.texmap_displacement_multiplier = 100
return output
# build a Vray Light material
elif ( cls == mxs.VrayLightMtl ):
# light materials only need opacity maps
if ( not opacityMap ):
return material
output = mcopy( material )
output.opacity_texmap = opacityMap
output.opacity_texmap_on = True
return output
# build a Arch_Design material
elif ( cls == mxs.Arch___Design__mi ):
output = mcopy( material )
output.cutout_map = opacityMap
# displace the texture
if ( not displacementTexture ):
output.displacementMap = displacementMap
# use the bump map
if ( bumpMap ):
output.bump_map = bumpMap
output.bump_map_amt = 1.0
# displace the property
elif ( is_prop( material, 'mental_ray__material_custom_attribute' ) ):
output.mental_ray__material_custom_attribute.displacement = displacementTexture
output.mental_ray__material_custom_attribute.displacementLocked = False
return output
# build a blend material
elif ( cls == mxs.Blend ):
if ( displacementMap and is_prop( 'mental_ray__material_custom_attribute' ) ):
output = mcopy( material )
# create a displacement texture
if ( not displacementTexture ):
displacementTexture = mxs.Displacement_3D__3dsmax()
displacementTexutre.map = displacementMap
output.displace = displacementTexture
return output
return material
# build a fast skin shader
elif ( cls in (mxs.SSS_Fast_Skin_Material_Displace__mi,mxs.SSS_Fast_Skin___w__Disp___mi) ):
if ( displacementMap ):
output = mcopy( material )
# use the bump map
if ( bumpMap ):
if ( mxs.classof( bumpMap != mxs.Bump__3dsmax ) ):
bumpTexture = mxs.Bump__3dsmax
bumpTexture.map = bumpMap
output.bump = bumpTexture
else:
output.bump = bumpMap
# use the displacement texture
if ( not displacementTexture ):
displacementTexture = mxs.Displacement_3D__3dsmax()
displacementTexture.map = displacementMap
output.displace = displacementTexture
return output
return material
# build a mental_ray shader
elif ( cls == mxs.Mental_Ray ):
output = mcopy( material )
# use displacement
if ( displacementMap ):
if ( not displacementTexture ):
displacementTexture = mxs.Displacement_3D__3dsmax()
displacementTexture.map = displacementMap
output.displacement = displacementTexture
# use opacity
if ( opacityMap ):
opacityMtl = mxs.Opacity__base()
opacityMtl.input_shader = material.surface
opacityMtl.opacity_shader = opacityMap
output.surface = opacityMtl
return output
# build a multi/material
elif ( cls == mxs.MultiMaterial ):
output = mcopy( material )
count = material.numsubs
output.numsubs = count
for i in range(count):
output[i] = buildMaterialFrom( material[i], opacityMap = opacityMap, displacementMap = displacementMap, bumpMap = bumpMap )
return output
# create a default material
else:
count = mxs.getNumSubMtls( material )
if ( count ):
output = mcopy( material )
get_submtl = mxs.getSubMtl
set_submtl = mxs.setSubMtl
for i in range( output ):
set_submtl( output, i + 1, buildMaterialFrom( get_submtl( material, i + 1 ), opacityMap = opacityMap, displacementMap = displacementMap, bumpMap = bumpMap ) )
return output
return material
def createMaterialOverride( baseMaterial, overrideMaterial, options = None, advancedState = None ):
"""
\remarks generate a proper override material based on the inputed base material by preserving aspects of the
base material based on the supplied options, while joining the main shader aspects of the override material
\param baseMaterial <Py3dsMax.mxs.Material>
\param overrideMaterial <Py3dsMax.mxs.Material>
\param options <cross3d.constants.MaterialOverrideOptions>
\param advancedState <dict> { <int> baseMaterialId: ( <blur3d.gui.SceneMaterial> override, <bool> ignored ) }
"""
from cross3d.constants import MaterialOverrideOptions
# use default options when none are supplied
if ( options == None ):
options = MaterialOverrideOptions.All
# make sure we have at least some overriding options or a base material to work from
if ( not (options or advancedState) ):
return overrideMaterial
# make sure we have an overrideMaterial to work from
if ( not (overrideMaterial and baseMaterial) ):
return overrideMaterial
# store maxscript values that we use more than once (faster)
is_kindof = mxs.isKindOf
multi_material = mxs.MultiMaterial
class_of = mxs.classOf
get_submtl = mxs.getSubMtl
set_submtl = mxs.setSubMtl
get_numsubmtls = mxs.getNumSubMtls
# process XRef materials
if ( is_kindof( baseMaterial, mxs.XRef_Material ) ):
return createMaterialOverride( (baseMaterial.getSourceMaterial(True)), overrideMaterial, options, advancedState )
# process Multi/Sub Materials
elif ( is_kindof( baseMaterial, multi_material ) ):
outputMaterial = multi_material()
count = baseMaterial.numsubs
outputMaterial.numsubs = count
for i in range( count ):
# determine the actual overriding material based on if the override material is a multi/sub or not
if ( is_kindof( overrideMaterial, multi_material ) ):
replaceMaterial = multi_material[i]
else:
replaceMaterial = overrideMaterial
subMaterial = baseMaterial[i]
# check to see if this is a mental ray holder material
if ( class_of( subMaterial ) == mxs.Mental_Ray and not subMaterial.surface ):
outputMaterial[i] = subMaterial
else:
outputMaterial[i] = createMaterialOverride( subMaterial, replaceMaterial, options = options, advancedState = advancedState )
return outputMaterial
# GK 09/24/10 standard behavior for alt materials is to apply alternates within blend materials (i.e. multimaterials) instead of replacing them entirely.
# this is so that the user has control over specific parts of the material (like with Multi-Subs). this works for pretty much every situation,
# however because we are now using VrayBlendMtls which are unsupported by renderers other than Vray, this method can create a situation where you're trying to render
# a scanline alt material with scanline, but it is nested within a VrayBlendMtl so it renders incorrectly. also, VrayBlendMtls do not support standard materials anyway
# so even rendering with Vray will not behave correctly. below is code to handle this situation:
elif ( class_of( overrideMaterial ) in (mxs.StandardMaterial,mxs.MatteShadow,mxs.Blur_Matte_mtl) ):
if ( class_of( baseMaterial ) in (mxs.VrayBlendMtl,mxs.VrayOverrideMtl,mxs.VrayMtlWrapper) ):
return createMaterialOverride( get_submtl( baseMaterial, 1 ), overrideMaterial, options = options, advancedState = advancedState )
# process any non-multi/sub multi-materials
elif ( get_numsubmtls( baseMaterial ) ):
outputMaterial = mxs.copy( baseMaterial )
count = get_numsubmtls( baseMaterial )
for i in range( count ):
if ( is_kindof( overrideMaterial, multi_material ) ):
replaceMaterial = multi_material[i]
else:
replaceMaterial = overrideMaterial
subMaterial = get_submtl( baseMaterial, i + 1 )
if ( subMaterial ):
if ( class_of( subMaterial ) == mxs.Mental_Ray and not subMaterial.surface ):
set_submtl( outputMaterial, i+1, subMaterial )
else:
set_submtl( outputMaterial, i+1, createMaterialOverride( subMaterial, replaceMaterial, options = options, advancedState = advancedState ) )
return outputMaterial
# process all other materials
if ( options & MaterialOverrideOptions.KeepOpacity ):
opacityMap = findOpacityMap( baseMaterial )
else:
opacityMap = None
if ( options & MaterialOverrideOptions.KeepDisplacement ):
displMap = findDisplacementMap( baseMaterial )
bumpMap = findBumpMap( baseMaterial )
else:
displMap = None
bumpMap = None
# check to see if we should use an advanced state
baseMaterialId = mxs.blurUtil.uniqueId(baseMaterial)
if ( advancedState and baseMaterialId in advancedState ):
overrideSceneMaterial, ignoreOverride = advancedState[baseMaterialId]
# ignore this material from the advanced state
if ( ignoreOverride ):
return baseMaterial
# pull the maxscript value from the wrapper instance
elif ( overrideSceneMaterial ):
overrideMaterial = overrideSceneMaterial.nativePointer()
# return the new material
return buildMaterialFrom( overrideMaterial, opacityMap = opacityMap, displacementMap = displMap, bumpMap = bumpMap )
def findBumpMap( material ):
"""
\remarks looks for the bump map for the inputed material based on its type
\param material <Py3dsMax.mxs.Material>
\return <Py3dsMax.mxs.Map> opacityMap || None
"""
cls = mxs.classof( material )
# return a standard material's bump map
if ( cls == mxs.StandardMaterial ):
if ( material.bumpMapEnable ):
bumpmap = material.bumpMap
if ( bumpmap and material.bumpMapAmount != 100 ):
bumpTexture = mxs.Output()
bumpTexture.map1 = bumpmap
bumpTexture.output.bump_amount = ( material.bumpMapAmount / 100.0 )
return bumpTexture
return bumpmap
return None
# return a vray bump map
if ( cls in (mxs.VrayMtl,mxs.VrayFastSSS2, mxs.VRaySkinMtl) ):
if ( material.texmap_bump_on ):
bumpmap = material.texmap_bump
if ( bumpmap and material.texmap_bump_multiplier != 100 ):
bumpTexture = mxs.Output()
bumpTexture.map1 = bumpmap
bumpTexture.output.bump_amount = ( material.texmap_bump_multiplier / 100.0 )
return bumpTexture
return bumpmap
return None
# return a matte bump
if ( cls in (mxs.Matte_Shadow_Reflection__mi,mxs.mr_Matte_Shadow_Reflection_Mtl) ):
return material.bump
# return an arch-design material
if ( cls == mxs.Arch___Design__mi ):
if ( material.bump_map_on ):
bumpmap = material.bump_map
if ( bumpmap and material.bump_map_amt != 1.0 ):
bumpTexture = mxs.Output()
bumpTexture.map1 = bumpmap
bumpTexture.output.bump_amount = mtl.bump_map_amt
return bumpTexture
return bumpmap
return None
# return a skin bump map
if ( cls in (mxs.SSS_Fast_Skin___w__Disp___mi,mxs.SSS_Fast_Skin___mi,mxs.SSS_Fast_Skin_Material_Displace__mi,mxs.SSS_Fast_Skin_Material__mi,mxs.SSS_Fast_Material__mi) ):
if ( mxs.classof(material.bump) == mxs.Bump__3dsmax ):
bumpmap = material.bump.map
if ( bumpmap ):
bumpTexture = mxs.Output()
bumpTexture.map1 = bumpmap
bumpTexture.output.bump_amount = material.bump.multiplier
return bumpTexture
return None
else:
return material.bump
# no bump map found
return None
def findDisplacementMap( material ):
"""
\remarks looks for the displacement map for the inputed material based on its type
\param material <Py3dsMax.mxs.Material>
\return <Py3dsMax.mxs.Map> opacityMap || None
"""
cls = mxs.classof( material )
is_prop = mxs.isproperty
# return a standard material's displacement map
if ( cls == mxs.StandardMaterial ):
if ( material.displacementMap and material.displacementMapEnable ):
return material.displacementMap
elif ( is_prop( material, 'mental_ray__material_custom_attribute' ) ):
mrattr = material.mental_ray__material_custom_attribute
if ( mrattr.displacementOn and not mrattr.displacementLocked ):
return mrattr.displacement
return None
# return a vray material's displacement map
elif ( cls in (mxs.VrayMtl, mxs.VRayFastSSS2, mxs.VRaySkinMtl) ):
if material.texmap_displacement_on:
return material.texmap_displacement
else:
return None
# return an arch design's material
elif ( cls == mxs.Arch___Design__mi ):
outMap = None
# first check for mental ray properties
if ( is_prop( material, 'mental_ray__material_custom_attribute' ) ):
mrattr = material.mental_ray__material_custom_attribute
if ( mrattr.displacementOn and not mrattr.displacementLocked ):
outMap = mrattr.displacement
else:
outMap = None
# create a custom output material to match the output amount
if ( not outMap and material.displacementMap and material.displacement_map_on ):
if ( material.displacement_map_amt ):
outMap = mxs.Output()
outMap.map1 = mtl.displacementMap
outMap.map1Enabled = True
outMap.output.Output_Amount = material.displacement_map_amt
else:
outMap = material.displacementMap
return outMap
# return a blend's displacement
elif ( cls == mxs.Blend ):
if ( is_prop( material, 'mental_ray__material_custom_attribute' ) ):
mrattr = material.mental_ray__material_custom_attribute
if ( mrattr.displacementOn and not mrattr.displacementLocked ):
return mrattr.displacement
return None
# return skin shader displacements
elif ( cls in (mxs.SSS_Fast_Skin_Material_Displace__mi,mxs.SSS_Fast_Skin___w__Disp___mi) ):
return material.displace
# return a mental ray displacement
elif ( cls == mxs.mental_ray ):
if material.displaceOn:
return material.displacement
return None
def findOpacityMap( material ):
"""
\remarks looks for the opacity map for the inputed material based on its type
\param material <Py3dsMax.mxs.Material>
\return <Py3dsMax.mxs.Map> opacityMap || None
"""
cls = mxs.classof( material )
# return a standard material's opacity map
if ( cls == mxs.StandardMaterial ):
if material.opacityMapEnable:
return material.opacityMap
# return a vray material's opacity map
elif ( cls in (mxs.VrayMtl, mxs.VRaySkinMtl)):
if material.texmap_opacity_on:
return material.texmap_opacity
# return a vray light material's opacity map
elif ( cls == mxs.VrayLightMtl ):
if material.opacity_texmap_on:
return material.opacity_texmap
# return a matte's opactiy map
elif ( cls in (mxs.Matte_Shadow_Reflection_mi,mxs.mr_Matte_Shadow_Reflection_Mtl) ):
if material.opacity_connected:
return material.opacity_shader
# return an arch design's opacity map
elif ( cls == mxs.Arch___Design__mi ):
if material.cutoutmap_on:
return material.cutout_map
return None
|
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
copy_rel_path=None,
timeout_seconds=30 * 60):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
# the entire repo will be cloned if copy_rel_path is not set.
if copy_rel_path:
environ['RELATIVE_COPY_PATH'] = copy_rel_path
docker_args = []
for k, v in list(environ.items()):
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'DOCKER_RUN_SCRIPT_COMMAND': shell_command,
}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmdline,
environ=None,
shell=False,
flake_retries=0,
timeout_retries=0,
use_workspace=False,
timeout_seconds=10 * 60):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self,
platform,
arch,
docker_suffix=None,
use_dotnet_cli=False,
presubmit=False):
self.name = 'csharp_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
if presubmit:
self.labels.append('presubmit')
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as there is little opportunity for parallelizing whats inside the distribtests
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix,
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(self.name, [
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix
],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
use_workspace=True)
elif self.platform == 'windows':
if self.arch == 'x64':
# Use double leading / as the first occurrence gets removed by msys bash
# when invoking the .bat file (side-effect of posix path conversion)
environ = {
'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
}
else:
environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
return create_jobspec(self.name, [
'test\\distrib\\csharp\\run_distrib_test%s.bat' %
self.script_suffix
],
environ=environ,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self,
platform,
arch,
docker_suffix,
source=False,
presubmit=False):
self.source = source
if source:
self.name = 'python_dev_%s_%s_%s' % (platform, arch, docker_suffix)
else:
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if not self.platform == 'linux':
raise Exception("Not supported yet.")
if self.source:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_dev_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_source_distrib_test.sh',
copy_rel_path='test/distrib')
else:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_binary_distrib_test.sh',
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self,
platform,
arch,
docker_suffix,
ruby_version=None,
source=False,
presubmit=False):
self.package_type = 'binary'
if source:
self.package_type = 'source'
self.name = 'ruby_%s_%s_%s_version_%s_package_type_%s' % (
platform, arch, docker_suffix, ruby_version or
'unspecified', self.package_type)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.ruby_version = ruby_version
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
arch_to_gem_arch = {
'x64': 'x86_64',
'x86': 'x86',
}
if not self.platform == 'linux':
raise Exception("Not supported yet.")
dockerfile_name = 'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix, self.arch)
if self.ruby_version is not None:
dockerfile_name += '_%s' % self.ruby_version
return create_docker_jobspec(
self.name,
dockerfile_name,
'test/distrib/ruby/run_distrib_test.sh %s %s %s' %
(arch_to_gem_arch[self.arch], self.platform, self.package_type),
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class PHP7DistribTest(object):
"""Tests PHP7 package"""
def __init__(self, platform, arch, docker_suffix=None, presubmit=False):
self.name = 'php7_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', 'php7', platform, arch]
if presubmit:
self.labels.append('presubmit')
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/php7_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/php/run_distrib_test.sh',
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(
self.name, ['test/distrib/php/run_distrib_test_macos.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
timeout_seconds=15 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make install by building examples."""
def __init__(self,
platform,
arch,
docker_suffix=None,
testcase=None,
presubmit=False):
if platform == 'linux':
self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
testcase)
else:
self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.testcase = testcase
self.labels = [
'distribtest',
'cpp',
platform,
arch,
testcase,
]
if presubmit:
self.labels.append('presubmit')
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs for the C++ build
environ['GRPC_CPP_DISTRIBTEST_BUILD_COMPILER_JOBS'] = str(
inner_jobs)
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/cpp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase,
timeout_seconds=45 * 60)
elif self.platform == 'windows':
return create_jobspec(
self.name,
['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
environ={},
timeout_seconds=30 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
# C++
CppDistribTest('linux', 'x64', 'stretch', 'cmake', presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_as_submodule',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_as_externalproject',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_fetchcontent',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_module_install',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_module_install_pkgconfig',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_pkgconfig',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch_aarch64_cross',
'cmake_aarch64_cross',
presubmit=True),
CppDistribTest('windows', 'x86', testcase='cmake', presubmit=True),
CppDistribTest('windows',
'x86',
testcase='cmake_as_externalproject',
presubmit=True),
# C#
CSharpDistribTest('linux', 'x64', 'stretch', presubmit=True),
CSharpDistribTest('linux',
'x64',
'stretch',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604', use_dotnet_cli=True),
CSharpDistribTest('linux',
'x64',
'alpine',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux',
'x64',
'dotnet31',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux',
'x64',
'dotnet5',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('macos', 'x64', presubmit=True),
CSharpDistribTest('windows', 'x86', presubmit=True),
CSharpDistribTest('windows', 'x64', presubmit=True),
# Python
PythonDistribTest('linux', 'x64', 'buster', presubmit=True),
PythonDistribTest('linux', 'x86', 'buster', presubmit=True),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora34'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'alpine'),
PythonDistribTest('linux', 'x64', 'ubuntu1804'),
PythonDistribTest('linux', 'aarch64', 'python38_buster',
presubmit=True),
PythonDistribTest('linux',
'x64',
'alpine3.7',
source=True,
presubmit=True),
PythonDistribTest('linux', 'x64', 'buster', source=True,
presubmit=True),
PythonDistribTest('linux', 'x86', 'buster', source=True,
presubmit=True),
PythonDistribTest('linux', 'x64', 'centos7', source=True),
PythonDistribTest('linux', 'x64', 'fedora34', source=True),
PythonDistribTest('linux', 'x64', 'arch', source=True),
PythonDistribTest('linux', 'x64', 'ubuntu1804', source=True),
# Ruby
RubyDistribTest('linux', 'x64', 'stretch', ruby_version='ruby_2_5'),
RubyDistribTest('linux', 'x64', 'stretch', ruby_version='ruby_2_6'),
RubyDistribTest('linux',
'x64',
'stretch',
ruby_version='ruby_2_7',
presubmit=True),
# TODO(apolcyn): add a ruby 3.0 test once protobuf adds support
RubyDistribTest('linux',
'x64',
'stretch',
ruby_version='ruby_2_5',
source=True,
presubmit=True),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'ubuntu1804', presubmit=True),
# PHP7
PHP7DistribTest('linux', 'x64', 'stretch', presubmit=True),
PHP7DistribTest('macos', 'x64', presubmit=True),
]
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import sys
from pants.base.deprecated import warn_or_error
from pants.option.arg_splitter import GLOBAL_SCOPE, ArgSplitter
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.option_util import is_list_option
from pants.option.option_value_container import OptionValueContainer
from pants.option.parser_hierarchy import ParserHierarchy, enclosing_scope
from pants.option.scope import ScopeInfo
class Options(object):
"""The outward-facing API for interacting with options.
Supports option registration and fetching option values.
Examples:
The value in global scope of option '--foo-bar' (registered in global scope) will be selected
in the following order:
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_GLOBAL_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [GLOBAL] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in global scope) will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the PANTS_GLOBAL_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [GLOBAL] section of pants.ini.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in scope 'compile') will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.ini.
- The value of the foo_bar key in the [compile] section of pants.ini.
- The value of the foo_bar key in the [GLOBAL] section of pants.ini
(because of automatic config file fallback to that section).
- The hard-coded value provided at registration time.
- None.
"""
class OptionTrackerRequiredError(Exception):
"""Options requires an OptionTracker instance."""
@classmethod
def complete_scopes(cls, scope_infos):
"""Expand a set of scopes to include all enclosing scopes.
E.g., if the set contains `foo.bar.baz`, ensure that it also contains `foo.bar` and `foo`.
Also adds any deprecated scopes.
"""
ret = {GlobalOptionsRegistrar.get_scope_info()}
original_scopes = set()
for si in scope_infos:
ret.add(si)
original_scopes.add(si.scope)
if si.deprecated_scope:
ret.add(ScopeInfo(si.deprecated_scope, si.category, si.optionable_cls))
original_scopes.add(si.deprecated_scope)
# TODO: Once scope name validation is enforced (so there can be no dots in scope name
# components) we can replace this line with `for si in scope_infos:`, because it will
# not be possible for a deprecated_scope to introduce any new intermediate scopes.
for si in copy.copy(ret):
scope = si.scope
while scope != '':
if scope not in original_scopes:
ret.add(ScopeInfo(scope, ScopeInfo.INTERMEDIATE))
scope = enclosing_scope(scope)
return ret
@classmethod
def create(cls, env, config, known_scope_infos, args=None, bootstrap_option_values=None,
option_tracker=None):
"""Create an Options instance.
:param env: a dict of environment variables.
:param :class:`pants.option.config.Config` config: data from a config file.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
:param :class:`pants.option.option_tracker.OptionTracker` option_tracker: option tracker
instance to record how option values were assigned.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos)
args = sys.argv if args is None else args
goals, scope_to_flags, target_specs, passthru, passthru_owner = splitter.split_args(args)
if not option_tracker:
raise cls.OptionTrackerRequiredError()
if bootstrap_option_values:
target_spec_files = bootstrap_option_values.target_spec_files
if target_spec_files:
for spec in target_spec_files:
with open(spec) as f:
target_specs.extend(filter(None, [line.strip() for line in f]))
help_request = splitter.help_request
parser_hierarchy = ParserHierarchy(env, config, complete_known_scope_infos, option_tracker)
values_by_scope = {} # Arg values, parsed per-scope on demand.
bootstrap_option_values = bootstrap_option_values
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info,
option_tracker)
def __init__(self, goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, values_by_scope, bootstrap_option_values, known_scope_to_info,
option_tracker):
"""The low-level constructor for an Options instance.
Dependees should use `Options.create` instead.
"""
self._goals = goals
self._scope_to_flags = scope_to_flags
self._target_specs = target_specs
self._passthru = passthru
self._passthru_owner = passthru_owner
self._help_request = help_request
self._parser_hierarchy = parser_hierarchy
self._values_by_scope = values_by_scope
self._bootstrap_option_values = bootstrap_option_values
self._known_scope_to_info = known_scope_to_info
self._option_tracker = option_tracker
@property
def tracker(self):
return self._option_tracker
@property
def help_request(self):
"""
:API: public
"""
return self._help_request
@property
def target_specs(self):
"""The targets to operate on.
:API: public
"""
return self._target_specs
@property
def goals(self):
"""The requested goals, in the order specified on the cmd line.
:API: public
"""
return self._goals
@property
def known_scope_to_info(self):
return self._known_scope_to_info
@property
def scope_to_flags(self):
return self._scope_to_flags
def drop_flag_values(self):
"""Returns a copy of these options that ignores values specified via flags.
Any pre-cached option values are cleared and only option values that come from option defaults,
the config or the environment are used.
"""
# An empty scope_to_flags to force all values to come via the config -> env hierarchy alone
# and empty values in case we already cached some from flags.
no_flags = {}
no_values = {}
return Options(self._goals,
no_flags,
self._target_specs,
self._passthru,
self._passthru_owner,
self._help_request,
self._parser_hierarchy,
no_values,
self._bootstrap_option_values,
self._known_scope_to_info,
self._option_tracker)
def is_known_scope(self, scope):
"""Whether the given scope is known by this instance.
:API: public
"""
return scope in self._known_scope_to_info
def passthru_args_for_scope(self, scope):
# Passthru args "belong" to the last scope mentioned on the command-line.
# Note: If that last scope is a goal, we allow all tasks in that goal to access the passthru
# args. This is to allow the more intuitive
# pants run <target> -- <passthru args>
# instead of requiring
# pants run.py <target> -- <passthru args>.
#
# However note that in the case where multiple tasks run in the same goal, e.g.,
# pants test <target> -- <passthru args>
# Then, e.g., both junit and pytest will get the passthru args even though the user probably
# only intended them to go to one of them. If the wrong one is not a no-op then the error will
# be unpredictable. However this is not a common case, and can be circumvented with an
# explicit test.pytest or test.junit scope.
if (scope and self._passthru_owner and scope.startswith(self._passthru_owner) and
(len(scope) == len(self._passthru_owner) or scope[len(self._passthru_owner)] == '.')):
return self._passthru
else:
return []
def register(self, scope, *args, **kwargs):
"""Register an option in the given scope."""
self.get_parser(scope).register(*args, **kwargs)
deprecated_scope = self.known_scope_to_info[scope].deprecated_scope
if deprecated_scope:
self.get_parser(deprecated_scope).register(*args, **kwargs)
def registration_function_for_optionable(self, optionable_class):
"""Returns a function for registering options on the given scope."""
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attatching properties to a function.
def register(*args, **kwargs):
kwargs['registering_class'] = optionable_class
self.register(optionable_class.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = optionable_class.options_scope
return register
def get_parser(self, scope):
"""Returns the parser for the given scope, so code can register on it directly."""
return self._parser_hierarchy.get_parser_by_scope(scope)
def walk_parsers(self, callback):
self._parser_hierarchy.walk(callback)
def for_scope(self, scope, inherit_from_enclosing_scope=True):
"""Return the option values for the given scope.
Values are attributes of the returned object, e.g., options.foo.
Computed lazily per scope.
:API: public
"""
# Short-circuit, if already computed.
if scope in self._values_by_scope:
return self._values_by_scope[scope]
# First get enclosing scope's option values, if any.
if scope == GLOBAL_SCOPE or not inherit_from_enclosing_scope:
values = OptionValueContainer()
else:
values = copy.copy(self.for_scope(enclosing_scope(scope)))
# Now add our values.
flags_in_scope = self._scope_to_flags.get(scope, [])
self._parser_hierarchy.get_parser_by_scope(scope).parse_args(flags_in_scope, values)
# If we're the new name of a deprecated scope, also get values from that scope.
deprecated_scope = self.known_scope_to_info[scope].deprecated_scope
# Note that deprecated_scope and scope share the same Optionable class, so deprecated_scope's
# Optionable has a deprecated_options_scope equal to deprecated_scope. Therefore we must
# check that scope != deprecated_scope to prevent infinite recursion.
if deprecated_scope is not None and scope != deprecated_scope:
# Do the deprecation check only on keys that were explicitly set on the deprecated scope
# (and not on its enclosing scopes).
explicit_keys = self.for_scope(deprecated_scope,
inherit_from_enclosing_scope=False).get_explicit_keys()
if explicit_keys:
warn_or_error(self.known_scope_to_info[scope].deprecated_scope_removal_version,
'scope {}'.format(deprecated_scope),
'Use scope {} instead (options: {})'.format(scope, ', '.join(explicit_keys)))
# Update our values with those of the deprecated scope (now including values inherited
# from its enclosing scope).
# Note that a deprecated val will take precedence over a val of equal rank.
# This makes the code a bit neater.
values.update(self.for_scope(deprecated_scope))
# Record the value derivation.
for option in values:
self._option_tracker.record_option(scope=scope, option=option, value=values[option],
rank=values.get_rank(option))
# Cache the values.
self._values_by_scope[scope] = values
return values
def get_fingerprintable_for_scope(self, scope, include_passthru=False):
"""Returns a list of fingerprintable (option type, option value) pairs for the given scope.
Fingerprintable options are options registered via a "fingerprint=True" kwarg.
:param str scope: The scope to gather fingerprintable options for.
:param bool include_passthru: Whether to include passthru args captured by `scope` in the
fingerprintable options.
:API: public
"""
pairs = []
if include_passthru:
# Passthru args can only be sent to outermost scopes so we gather them once here up-front.
passthru_args = self.passthru_args_for_scope(scope)
# NB: We can't sort passthru args, the underlying consumer may be order-sensitive.
pairs.extend((str, passthru_arg) for passthru_arg in passthru_args)
# Note that we iterate over options registered at `scope` and at all enclosing scopes, since
# option-using code can read those values indirectly via its own OptionValueContainer, so
# they can affect that code's output.
registration_scope = scope
while registration_scope is not None:
parser = self._parser_hierarchy.get_parser_by_scope(registration_scope)
# Sort the arguments, so that the fingerprint is consistent.
for (_, kwargs) in sorted(parser.option_registrations_iter()):
if kwargs.get('recursive') and not kwargs.get('recursive_root'):
continue # We only need to fprint recursive options once.
if kwargs.get('fingerprint') is not True:
continue
# Note that we read the value from scope, even if the registration was on an enclosing
# scope, to get the right value for recursive options (and because this mirrors what
# option-using code does).
val = self.for_scope(scope)[kwargs['dest']]
# If we have a list then we delegate to the fingerprinting implementation of the members.
if is_list_option(kwargs):
val_type = kwargs.get('member_type', str)
else:
val_type = kwargs.get('type', str)
pairs.append((val_type, val))
registration_scope = (None if registration_scope == ''
else enclosing_scope(registration_scope))
return pairs
def __getitem__(self, scope):
# TODO(John Sirois): Mainly supports use of dict<str, dict<str, str>> for mock options in tests,
# Consider killing if tests consolidate on using TestOptions instead of the raw dicts.
return self.for_scope(scope)
def bootstrap_option_values(self):
"""Return the option values for bootstrap options.
General code can also access these values in the global scope. But option registration code
cannot, hence this special-casing of this small set of options.
"""
return self._bootstrap_option_values
def for_global_scope(self):
"""Return the option values for the global scope.
:API: public
"""
return self.for_scope(GLOBAL_SCOPE)
|
|
import os
import time
import inspect
import json
import traceback
from collections import defaultdict
import numpy as np
from . import test_functions as funcs
from . import go_benchmark_functions as gbf
from .common import Benchmark, is_xslow, safe_import
from .lsq_problems import extract_lsq_problems
with safe_import():
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import (leastsq, basinhopping, differential_evolution,
dual_annealing)
from scipy.optimize._minimize import MINIMIZE_METHODS
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
@classmethod
def from_funcobj(cls, function_name, function, **minimizer_kwargs):
self = cls.__new__(cls)
self.function_name = function_name
self.function = function
self.fun = function.fun
if hasattr(function, 'der'):
self.der = function.der
self.bounds = function.bounds
self.minimizer_kwargs = minimizer_kwargs
self.results = []
return self
def reset(self):
self.results = []
def energy_gradient(self, x):
return self.fun(x), self.function.der(x)
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
newres.nsuccess = len([r for r in result_list if r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results
# for basinhopping
def accept_test(self, x_new=None, *args, **kwargs):
"""
Does the new candidate vector lie in between the bounds?
Returns
-------
accept_test : bool
The candidate vector lies in between the bounds
"""
if not hasattr(self.function, "xmin"):
return True
if np.any(x_new < self.function.xmin):
return False
if np.any(x_new > self.function.xmax):
return False
return True
def run_basinhopping(self):
"""
Do an optimization run for basinhopping
"""
kwargs = self.minimizer_kwargs
if hasattr(self.fun, "temperature"):
kwargs["T"] = self.function.temperature
if hasattr(self.fun, "stepsize"):
kwargs["stepsize"] = self.function.stepsize
minimizer_kwargs = {"method": "L-BFGS-B"}
x0 = self.function.initial_vector()
# basinhopping - no gradient
minimizer_kwargs['jac'] = False
self.function.nfev = 0
t0 = time.time()
res = basinhopping(
self.fun, x0, accept_test=self.accept_test,
minimizer_kwargs=minimizer_kwargs,
**kwargs)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'basinh.')
def run_differentialevolution(self):
"""
Do an optimization run for differential_evolution
"""
self.function.nfev = 0
t0 = time.time()
res = differential_evolution(self.fun,
self.bounds,
popsize=20)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DE')
def run_dualannealing(self):
"""
Do an optimization run for dual_annealing
"""
self.function.nfev = 0
t0 = time.time()
res = dual_annealing(self.fun,
self.bounds)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DA')
def bench_run_global(self, numtrials=50, methods=None):
"""
Run the optimization tests for the required minimizers.
"""
if methods is None:
methods = ['DE', 'basinh.', 'DA']
method_fun = {'DE': self.run_differentialevolution,
'basinh.': self.run_basinhopping,
'DA': self.run_dualannealing,}
for i in range(numtrials):
for m in methods:
method_fun[m]()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = MINIMIZE_METHODS
# L-BFGS-B, BFGS, trust-constr can use gradients, but examine
# performance when numerical differentiation is used.
fonly_methods = ["COBYLA", 'Powell', 'nelder-mead', 'L-BFGS-B', 'BFGS',
'trust-constr']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
'trust-constr']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov', 'trust-constr']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock_slow', 'rosenbrock_nograd', 'rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell', 'nelder-mead',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'trust-constr'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
r = b.average_results().get(method_name)
if r is None:
raise NotImplementedError()
self.result = getattr(r, ret_val)
def track_all(self, func_name, method_name, ret_val):
return self.result
# SlowRosen has a 50us delay on each function evaluation. By comparing to
# rosenbrock_nograd it should be possible to figure out how much time a
# minimizer uses internally, compared to the time required for function
# evaluation.
def run_rosenbrock_slow(self, methods=None):
s = funcs.SlowRosen()
b = _BenchOptimizers("Rosenbrock function",
fun=s.fun)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
# see what the performance of the solvers are if numerical differentiation
# has to be used.
def run_rosenbrock_nograd(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Booth's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_beale(self, methods=None):
s = funcs.Beale()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Beale's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_LJ(self, methods=None):
s = funcs.LJ()
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient,
# np.random.uniform(-2,2,3*4))
natoms = 4
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, natoms*3), methods=methods)
return b
class BenchLeastSquares(Benchmark):
"""Class for benchmarking nonlinear least squares solvers."""
problems = extract_lsq_problems()
params = [
list(problems.keys()),
["average time", "nfev", "success"]
]
param_names = [
"problem", "result type"
]
def track_all(self, problem_name, result_type):
problem = self.problems[problem_name]
if problem.lb is not None or problem.ub is not None:
raise NotImplementedError
ftol = 1e-5
if result_type == 'average time':
n_runs = 10
t0 = time.time()
for _ in range(n_runs):
leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol,
full_output=True)
return (time.time() - t0) / n_runs
x, cov_x, info, message, ier = leastsq(
problem.fun, problem.x0, Dfun=problem.jac,
ftol=ftol, full_output=True
)
if result_type == 'nfev':
return info['nfev']
elif result_type == 'success':
return int(problem.check_answer(x, ftol))
else:
raise NotImplementedError
# `export SCIPY_XSLOW=1` to enable BenchGlobal.track_all
# `export SCIPY_GLOBAL_BENCH=AMGM,Adjiman,...` to run specific tests
# `export SCIPY_GLOBAL_BENCH_NUMTRIALS=10` to specify n_iterations, default 100
#
# Note that it can take several hours to run; intermediate output
# can be found under benchmarks/global-bench-results.json
class BenchGlobal(Benchmark):
"""
Benchmark the global optimizers using the go_benchmark_functions
suite
"""
timeout = 300
_functions = dict([
item for item in inspect.getmembers(gbf, inspect.isclass)
if (issubclass(item[1], gbf.Benchmark) and
item[0] not in ('Benchmark') and
not item[0].startswith('Problem'))
])
if not is_xslow():
_enabled_functions = []
elif 'SCIPY_GLOBAL_BENCH' in os.environ:
_enabled_functions = [x.strip() for x in
os.environ['SCIPY_GLOBAL_BENCH'].split(',')]
else:
_enabled_functions = list(_functions.keys())
params = [
list(_functions.keys()),
["success%", "<nfev>"],
['DE', 'basinh.', 'DA'],
]
param_names = ["test function", "result type", "solver"]
def __init__(self):
self.enabled = is_xslow()
try:
self.numtrials = int(os.environ['SCIPY_GLOBAL_BENCH_NUMTRIALS'])
except (KeyError, ValueError):
self.numtrials = 100
self.dump_fn = os.path.join(os.path.dirname(__file__), '..', 'global-bench-results.json')
self.results = {}
def setup(self, name, ret_value, solver):
if name not in self._enabled_functions:
raise NotImplementedError("skipped")
# load json backing file
with open(self.dump_fn, 'r') as f:
self.results = json.load(f)
def teardown(self, name, ret_value, solver):
if not self.enabled:
return
with open(self.dump_fn, 'w') as f:
json.dump(self.results, f, indent=2, sort_keys=True)
def track_all(self, name, ret_value, solver):
if name in self.results and solver in self.results[name]:
# have we done the function, and done the solver?
# if so, then just return the ret_value
av_results = self.results[name]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
klass = self._functions[name]
f = klass()
try:
b = _BenchOptimizers.from_funcobj(name, f)
with np.errstate(all='ignore'):
b.bench_run_global(methods=[solver],
numtrials=self.numtrials)
av_results = b.average_results()
if name not in self.results:
self.results[name] = {}
self.results[name][solver] = av_results[solver]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
except Exception:
print("".join(traceback.format_exc()))
self.results[name] = "".join(traceback.format_exc())
def setup_cache(self):
if not self.enabled:
return
# create the logfile to start with
with open(self.dump_fn, 'w') as f:
json.dump({}, f, indent=2)
|
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import inspect
import imp
import string
import logging
from functools import partial
from collections import OrderedDict
from wlauto.core.bootstrap import settings
from wlauto.core.extension import Extension
from wlauto.exceptions import NotFoundError, LoaderError
from wlauto.utils.misc import walk_modules, load_class, merge_lists, merge_dicts, get_article
from wlauto.utils.types import identifier
MODNAME_TRANS = string.maketrans(':/\\.', '____')
class ExtensionLoaderItem(object):
def __init__(self, ext_tuple):
self.name = ext_tuple.name
self.default_package = ext_tuple.default_package
self.default_path = ext_tuple.default_path
self.cls = load_class(ext_tuple.cls)
class GlobalParameterAlias(object):
"""
Represents a "global alias" for an extension parameter. A global alias
is specified at the top-level of config rather namespaced under an extension
name.
Multiple extensions may have parameters with the same global_alias if they are
part of the same inheritance hierarchy and one parameter is an override of the
other. This class keeps track of all such cases in its extensions dict.
"""
def __init__(self, name):
self.name = name
self.extensions = {}
def iteritems(self):
for ext in self.extensions.itervalues():
yield (self.get_param(ext), ext)
def get_param(self, ext):
for param in ext.parameters:
if param.global_alias == self.name:
return param
message = 'Extension {} does not have a parameter with global alias {}'
raise ValueError(message.format(ext.name, self.name))
def update(self, other_ext):
self._validate_ext(other_ext)
self.extensions[other_ext.name] = other_ext
def _validate_ext(self, other_ext):
other_param = self.get_param(other_ext)
for param, ext in self.iteritems():
if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
other_param.kind != param.kind):
message = 'Duplicate global alias {} declared in {} and {} extensions with different types'
raise LoaderError(message.format(self.name, ext.name, other_ext.name))
if param.kind != other_param.kind:
message = 'Two params {} in {} and {} in {} both declare global alias {}, and are of different kinds'
raise LoaderError(message.format(param.name, ext.name,
other_param.name, other_ext.name, self.name))
def __str__(self):
text = 'GlobalAlias({} => {})'
extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
return text.format(self.name, extlist)
class ExtensionLoader(object):
"""
Discovers, enumerates and loads available devices, configs, etc.
The loader will attempt to discover things on construction by looking
in predetermined set of locations defined by default_paths. Optionally,
additional locations may specified through paths parameter that must
be a list of additional Python module paths (i.e. dot-delimited).
"""
_instance = None
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(ExtensionLoader, cls).__new__(cls, *args, **kwargs)
else:
for k, v in kwargs.iteritems():
if not hasattr(cls._instance, k):
raise ValueError('Invalid parameter for ExtensionLoader: {}'.format(k))
setattr(cls._instance, k, v)
return cls._instance
def set_load_defaults(self, value):
self._load_defaults = value
if value:
self.packages = merge_lists(self.default_packages, self.packages, duplicates='last')
def get_load_defaults(self):
return self._load_defaults
load_defaults = property(get_load_defaults, set_load_defaults)
def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False, load_defaults=True):
"""
params::
:packages: List of packages to load extensions from.
:paths: List of paths to be searched for Python modules containing
WA extensions.
:ignore_paths: List of paths to ignore when search for WA extensions (these would
typically be subdirectories of one or more locations listed in
``paths`` parameter.
:keep_going: Specifies whether to keep going if an error occurs while loading
extensions.
:load_defaults: Specifies whether extension should be loaded from default locations
(WA package, and user's WA directory) as well as the packages/paths
specified explicitly in ``packages`` and ``paths`` parameters.
"""
self._load_defaults = None
self.logger = logging.getLogger('ExtensionLoader')
self.keep_going = keep_going
self.extension_kinds = {ext_tuple.name: ExtensionLoaderItem(ext_tuple)
for ext_tuple in settings.extensions}
self.default_packages = [ext.default_package for ext in self.extension_kinds.values()]
self.packages = packages or []
self.load_defaults = load_defaults
self.paths = paths or []
self.ignore_paths = ignore_paths or []
self.extensions = {}
self.aliases = {}
self.global_param_aliases = {}
# create an empty dict for each extension type to store discovered
# extensions.
for ext in self.extension_kinds.values():
setattr(self, '_' + ext.name, {})
self._load_from_packages(self.packages)
self._load_from_paths(self.paths, self.ignore_paths)
def update(self, packages=None, paths=None, ignore_paths=None):
""" Load extensions from the specified paths/packages
without clearing or reloading existing extension. """
if packages:
self.packages.extend(packages)
self._load_from_packages(packages)
if paths:
self.paths.extend(paths)
self.ignore_paths.extend(ignore_paths or [])
self._load_from_paths(paths, ignore_paths or [])
def clear(self):
""" Clear all discovered items. """
self.extensions.clear()
for ext in self.extension_kinds.values():
self._get_store(ext).clear()
def reload(self):
""" Clear all discovered items and re-run the discovery. """
self.clear()
self._load_from_packages(self.packages)
self._load_from_paths(self.paths, self.ignore_paths)
def get_extension_class(self, name, kind=None):
"""
Return the class for the specified extension if found or raises ``ValueError``.
"""
name, _ = self.resolve_alias(name)
if kind is None:
return self.extensions[name]
ext = self.extension_kinds.get(kind)
if ext is None:
raise ValueError('Unknown extension type: {}'.format(kind))
store = self._get_store(ext)
if name not in store:
raise NotFoundError('Extensions {} is not {} {}.'.format(name, get_article(kind), kind))
return store[name]
def get_extension(self, name, *args, **kwargs):
"""
Return extension of the specified kind with the specified name. Any additional
parameters will be passed to the extension's __init__.
"""
name, base_kwargs = self.resolve_alias(name)
kind = kwargs.pop('kind', None)
kwargs = merge_dicts(base_kwargs, kwargs, list_duplicates='last', dict_type=OrderedDict)
cls = self.get_extension_class(name, kind)
extension = _instantiate(cls, args, kwargs)
extension.load_modules(self)
return extension
def get_default_config(self, ext_name):
"""
Returns the default configuration for the specified extension name. The name may be an alias,
in which case, the returned config will be augmented with appropriate alias overrides.
"""
real_name, alias_config = self.resolve_alias(ext_name)
base_default_config = self.get_extension_class(real_name).get_default_config()
return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
def list_extensions(self, kind=None):
"""
List discovered extension classes. Optionally, only list extensions of a
particular type.
"""
if kind is None:
return self.extensions.values()
if kind not in self.extension_kinds:
raise ValueError('Unknown extension type: {}'.format(kind))
return self._get_store(self.extension_kinds[kind]).values()
def has_extension(self, name, kind=None):
"""
Returns ``True`` if an extensions with the specified ``name`` has been
discovered by the loader. If ``kind`` was specified, only returns ``True``
if the extension has been found, *and* it is of the specified kind.
"""
try:
self.get_extension_class(name, kind)
return True
except NotFoundError:
return False
def resolve_alias(self, alias_name):
"""
Try to resolve the specified name as an extension alias. Returns a
two-tuple, the first value of which is actual extension name, and the
second is a dict of parameter values for this alias. If the name passed
is already an extension name, then the result is ``(alias_name, {})``.
"""
alias_name = identifier(alias_name.lower())
if alias_name in self.extensions:
return (alias_name, {})
if alias_name in self.aliases:
alias = self.aliases[alias_name]
return (alias.extension_name, alias.params)
raise NotFoundError('Could not find extension or alias "{}"'.format(alias_name))
# Internal methods.
def __getattr__(self, name):
"""
This resolves methods for specific extensions types based on corresponding
generic extension methods. So it's possible to say things like ::
loader.get_device('foo')
instead of ::
loader.get_extension('foo', kind='device')
"""
if name.startswith('get_'):
name = name.replace('get_', '', 1)
if name in self.extension_kinds:
return partial(self.get_extension, kind=name)
if name.startswith('list_'):
name = name.replace('list_', '', 1).rstrip('s')
if name in self.extension_kinds:
return partial(self.list_extensions, kind=name)
if name.startswith('has_'):
name = name.replace('has_', '', 1)
if name in self.extension_kinds:
return partial(self.has_extension, kind=name)
raise AttributeError(name)
def _get_store(self, ext):
name = getattr(ext, 'name', ext)
return getattr(self, '_' + name)
def _load_from_packages(self, packages):
try:
for package in packages:
for module in walk_modules(package):
self._load_module(module)
except ImportError as e:
message = 'Problem loading extensions from package {}: {}'
raise LoaderError(message.format(package, e.message))
def _load_from_paths(self, paths, ignore_paths):
self.logger.debug('Loading from paths.')
for path in paths:
self.logger.debug('Checking path %s', path)
for root, _, files in os.walk(path, followlinks=True):
should_skip = False
for igpath in ignore_paths:
if root.startswith(igpath):
should_skip = True
break
if should_skip:
continue
for fname in files:
if os.path.splitext(fname)[1].lower() != '.py':
continue
filepath = os.path.join(root, fname)
try:
modname = os.path.splitext(filepath[1:])[0].translate(MODNAME_TRANS)
module = imp.load_source(modname, filepath)
self._load_module(module)
except (SystemExit, ImportError), e:
if self.keep_going:
self.logger.warn('Failed to load {}'.format(filepath))
self.logger.warn('Got: {}'.format(e))
else:
raise LoaderError('Failed to load {}'.format(filepath), sys.exc_info())
except Exception as e:
message = 'Problem loading extensions from {}: {}'
raise LoaderError(message.format(filepath, e))
def _load_module(self, module): # NOQA pylint: disable=too-many-branches
self.logger.debug('Checking module %s', module.__name__)
for obj in vars(module).itervalues():
if inspect.isclass(obj):
if not issubclass(obj, Extension) or not hasattr(obj, 'name') or not obj.name:
continue
try:
for ext in self.extension_kinds.values():
if issubclass(obj, ext.cls):
self._add_found_extension(obj, ext)
break
else: # did not find a matching Extension type
message = 'Unknown extension type for {} (type: {})'
raise LoaderError(message.format(obj.name, obj.__class__.__name__))
except LoaderError as e:
if self.keep_going:
self.logger.warning(e)
else:
raise e
def _add_found_extension(self, obj, ext):
"""
:obj: Found extension class
:ext: matching extension item.
"""
self.logger.debug('\tAdding %s %s', ext.name, obj.name)
key = identifier(obj.name.lower())
obj.kind = ext.name
if key in self.extensions or key in self.aliases:
raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
# Extensions are tracked both, in a common extensions
# dict, and in per-extension kind dict (as retrieving
# extensions by kind is a common use case.
self.extensions[key] = obj
store = self._get_store(ext)
store[key] = obj
for alias in obj.aliases:
alias_id = identifier(alias.name)
if alias_id in self.extensions or alias_id in self.aliases:
raise LoaderError('{} {} already exists.'.format(ext.name, obj.name))
self.aliases[alias_id] = alias
# Update global aliases list. If a global alias is already in the list,
# then make sure this extension is in the same parent/child hierarchy
# as the one already found.
for param in obj.parameters:
if param.global_alias:
if param.global_alias not in self.global_param_aliases:
ga = GlobalParameterAlias(param.global_alias)
ga.update(obj)
self.global_param_aliases[ga.name] = ga
else: # global alias already exists.
self.global_param_aliases[param.global_alias].update(obj)
# Utility functions.
def _instantiate(cls, args=None, kwargs=None):
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
try:
return cls(*args, **kwargs)
except Exception:
raise LoaderError('Could not load {}'.format(cls), sys.exc_info())
|
|
# Source: https://github.com/wolever/nose-parameterized
# commit 0da3a2f0325c17858a5f5f1fdf1939520ce85e48, Aug 25, 2013
# Stefan: I removed the dependency on the six module, don't need the portability, now.
#
# tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
#
# Unless stated otherwise in the source files, all code is copyright 2010 David
# Wolever <david@wolever.net>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of David Wolever.
import re
import inspect
from functools import wraps
from collections import namedtuple
from nose.tools import nottest
from unittest import TestCase
import new
new_instancemethod = new.instancemethod
_param = namedtuple("param", "args kwargs")
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args , **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
if isinstance(args, basestring):
args = (args, )
return cls(*args)
def __repr__(self):
return "param(*%r, **%r)" %self
class parameterized(object):
""" Parameterize a test case::
class TestInt(object):
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input):
self.get_input = self.input_as_callable(input)
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def parameterized_helper_method(test_self=None):
f = test_func
if test_self is not None:
# If we are a test method (which we suppose to be true if we
# are being passed a "self" argument), we first need to create
# an instance method, attach it to the instance of the test
# class, then pull it back off to turn it into a bound method.
# If we don't do this, Nose gets cranky.
f = self.make_bound_method(test_self, test_func)
# Note: because nose is so very picky, the more obvious
# ``return self.yield_nose_tuples(f)`` won't work here.
for nose_tuple in self.yield_nose_tuples(f):
yield nose_tuple
test_func.__name__ = "_helper_for_%s" %(test_func.__name__, )
parameterized_helper_method.parameterized_input = input
parameterized_helper_method.parameterized_func = test_func
return parameterized_helper_method
def yield_nose_tuples(self, func):
for args in self.get_input():
p = param.from_decorator(args)
# ... then yield that as a tuple. If those steps aren't
# followed precicely, Nose gets upset and doesn't run the test
# or doesn't run setup methods.
yield self.param_as_nose_tuple(p, func)
def param_as_nose_tuple(self, p, func):
nose_func = func
nose_args = p.args
if p.kwargs:
nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs))
nose_args = (p.args, p.kwargs)
return (nose_func, ) + nose_args
def make_bound_method(self, instance, func):
cls = type(instance)
im_f = new_instancemethod(func, None, cls)
setattr(cls, func.__name__, im_f)
return getattr(instance, func.__name__)
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead")
def _terrible_magic_get_defining_classes(self):
""" Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, parents = code_context.split("(", 1)
parents, _ = parents.rsplit(")", 1)
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
if not hasattr(input_values, "__iter__"):
raise ValueError("expected iterable input; got %r" %(input, ))
return input_values
@classmethod
def expand(cls, input):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
>>> @parameterized.expand([("foo", 1, 2)])
... def test_add1(name, input, expected):
... actual = add1(input)
... assert_equal(actual, expected)
...
>>> locals()
... 'test_add1_foo_0': <function ...> ...
>>>
"""
def parameterized_expand_wrapper(f):
stack = inspect.stack()
frame = stack[1]
frame_locals = frame[0].f_locals
base_name = f.__name__
get_input = cls.input_as_callable(input)
for num, args in enumerate(get_input()):
p = param.from_decorator(args)
name_suffix = "_%s" %(num, )
if len(p.args) > 0 and isinstance(p.args[0], basestring):
name_suffix += "_" + cls.to_safe_name(p.args[0])
name = base_name + name_suffix
frame_locals[name] = cls.param_as_standalone_func(p, f, name)
return nottest(f)
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
standalone_func.__name__ = name
return standalone_func
@classmethod
def to_safe_name(cls, s):
return str(re.sub("[^a-zA-Z0-9_]", "", s))
|
|
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import Table, Column, Integer, String, Boolean, Float, DateTime, ForeignKey, Text, Index
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
Base = declarative_base()
# This table actually allows us to make a many to many relationship
# between transactions table and grep_outputs table
target_association_table = Table(
'target_session_association',
Base.metadata,
Column('target_id', Integer, ForeignKey('targets.id')),
Column('session_id', Integer, ForeignKey('sessions.id'))
)
Index('target_id_idx', target_association_table.c.target_id, postgresql_using='btree')
class Session(Base):
__tablename__ = "sessions"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True)
active = Column(Boolean, default=False)
targets = relationship("Target", secondary=target_association_table, backref="sessions")
class Target(Base):
__tablename__ = "targets"
id = Column(Integer, primary_key=True, autoincrement=True)
target_url = Column(String, unique=True)
host_ip = Column(String)
port_number = Column(String)
url_scheme = Column(String)
alternative_ips = Column(String, nullable=True) # Comma seperated
host_name = Column(String)
host_path = Column(String)
ip_url = Column(String)
top_domain = Column(String)
top_url = Column(String)
scope = Column(Boolean, default=True)
transactions = relationship("Transaction", cascade="delete")
poutputs = relationship("PluginOutput", cascade="delete")
urls = relationship("Url", cascade="delete")
commands = relationship("Command", cascade="delete")
# Also has a column session specified as backref in
# session model
works = relationship("Work", backref="target", cascade="delete")
@hybrid_property
def max_user_rank(self):
user_ranks = [-1]
user_ranks += [poutput.user_rank for poutput in self.poutputs]
return(max(user_ranks))
@hybrid_property
def max_owtf_rank(self):
owtf_ranks = [-1]
owtf_ranks += [poutput.owtf_rank for poutput in self.poutputs]
return(max(owtf_ranks))
def __repr__(self):
return "<Target (url='%s')>" % (self.target_url)
# This table actually allows us to make a many to many relationship
# between transactions table and grep_outputs table
transaction_association_table = Table(
'transaction_grep_association',
Base.metadata,
Column('transaction_id', Integer, ForeignKey('transactions.id')),
Column('grep_output_id', Integer, ForeignKey('grep_outputs.id'))
)
Index('transaction_id_idx', transaction_association_table.c.transaction_id, postgresql_using='btree')
class Transaction(Base):
__tablename__ = "transactions"
target_id = Column(Integer, ForeignKey("targets.id"))
id = Column(Integer, primary_key=True)
url = Column(String)
scope = Column(Boolean, default=False)
method = Column(String)
data = Column(String, nullable=True) # Post DATA
time = Column(Float(precision=10))
time_human = Column(String)
local_timestamp = Column(DateTime)
raw_request = Column(Text)
response_status = Column(String)
response_headers = Column(Text)
response_size = Column(Integer, nullable=True)
response_body = Column(Text, nullable=True)
binary_response = Column(Boolean, nullable=True)
session_tokens = Column(String, nullable=True)
login = Column(Boolean, nullable=True)
logout = Column(Boolean, nullable=True)
grep_outputs = relationship(
"GrepOutput",
secondary=transaction_association_table,
cascade="delete",
backref="transactions"
)
def __repr__(self):
return "<HTTP Transaction (url='%s' method='%s' response_status='%s')>" % (self.url, self.method,
self.response_status)
class GrepOutput(Base):
__tablename__ = "grep_outputs"
target_id = Column(Integer, ForeignKey("targets.id"))
id = Column(Integer, primary_key=True)
name = Column(String)
output = Column(Text)
# Also has a column transactions, which is added by
# using backref in transaction
__table_args__ = (UniqueConstraint('name', 'output', target_id),)
class Url(Base):
__tablename__ = "urls"
target_id = Column(Integer, ForeignKey("targets.id"))
url = Column(String, primary_key=True)
visited = Column(Boolean, default=False)
scope = Column(Boolean, default=True)
def __repr__(self):
return "<URL (url='%s')>" % (self.url)
class PluginOutput(Base):
__tablename__ = "plugin_outputs"
target_id = Column(Integer, ForeignKey("targets.id"))
plugin_key = Column(String, ForeignKey("plugins.key"))
# There is a column named plugin which is caused by backref from the plugin class
id = Column(Integer, primary_key=True)
plugin_code = Column(String) # OWTF Code
plugin_group = Column(String)
plugin_type = Column(String)
date_time = Column(DateTime, default=datetime.datetime.now())
start_time = Column(DateTime)
end_time = Column(DateTime)
output = Column(String, nullable=True)
error = Column(String, nullable=True)
status = Column(String, nullable=True)
user_notes = Column(String, nullable=True)
user_rank = Column(Integer, nullable=True, default=-1)
owtf_rank = Column(Integer, nullable=True, default=-1)
output_path = Column(String, nullable=True)
@hybrid_property
def run_time(self):
return self.end_time - self.start_time
__table_args__ = (UniqueConstraint('plugin_key', 'target_id'),)
class Command(Base):
__tablename__ = "command_register"
start_time = Column(DateTime)
end_time = Column(DateTime)
success = Column(Boolean, default=False)
target_id = Column(Integer, ForeignKey("targets.id"))
plugin_key = Column(String, ForeignKey("plugins.key"))
modified_command = Column(String)
original_command = Column(String, primary_key=True)
@hybrid_property
def run_time(self):
return self.end_time - self.start_time
class Error(Base):
__tablename__ = "errors"
id = Column(Integer, primary_key=True)
owtf_message = Column(String)
traceback = Column(String, nullable=True)
user_message = Column(String, nullable=True)
reported = Column(Boolean, default=False)
def __repr__(self):
return "<Error (traceback='%s')>" % (self.traceback)
class Resource(Base):
__tablename__ = "resources"
id = Column(Integer, primary_key=True)
dirty = Column(Boolean, default=False) # Dirty if user edited it. Useful while updating
resource_name = Column(String)
resource_type = Column(String)
resource = Column(String)
__table_args__ = (UniqueConstraint('resource', 'resource_type', 'resource_name'),)
class ConfigSetting(Base):
__tablename__ = "configuration"
key = Column(String, primary_key=True)
value = Column(String)
section = Column(String)
descrip = Column(String, nullable=True)
dirty = Column(Boolean, default=False)
def __repr__(self):
return "<ConfigSetting (key='%s', value='%s', dirty='%r')>" % (self.key, self.value, self.dirty)
class TestGroup(Base):
__tablename__ = "test_groups"
code = Column(String, primary_key=True)
group = Column(String) # web, network
descrip = Column(String)
hint = Column(String, nullable=True)
url = Column(String)
priority = Column(Integer)
plugins = relationship("Plugin")
class Plugin(Base):
__tablename__ = "plugins"
key = Column(String, primary_key=True) # key = type@code
title = Column(String)
name = Column(String)
code = Column(String, ForeignKey("test_groups.code"))
group = Column(String)
type = Column(String)
descrip = Column(String, nullable=True)
file = Column(String)
attr = Column(String, nullable=True)
works = relationship("Work", backref="plugin", cascade="delete")
outputs = relationship("PluginOutput", backref="plugin")
def __repr__(self):
return "<Plugin (code='%s', group='%s', type='%s')>" % (self.code, self.group, self.type)
@hybrid_property
def min_time(self):
"""
Consider last 5 runs only, better performance and accuracy
"""
poutputs_num = len(self.outputs)
if poutputs_num != 0:
if poutputs_num < 5:
run_times = [poutput.run_time for poutput in self.outputs]
else:
run_times = [poutput.run_time for poutput in self.outputs[-5:]]
return min(run_times)
else:
return None
@hybrid_property
def max_time(self):
"""
Consider last 5 runs only, better performance and accuracy
"""
poutputs_num = len(self.outputs)
if poutputs_num != 0:
if poutputs_num < 5:
run_times = [poutput.run_time for poutput in self.outputs]
else:
run_times = [poutput.run_time for poutput in self.outputs[-5:]]
return max(run_times)
else:
return None
__table_args__ = (UniqueConstraint('type', 'code'),)
class Work(Base):
__tablename__ = "worklist"
id = Column(Integer, primary_key=True, autoincrement=True)
target_id = Column(Integer, ForeignKey("targets.id"))
plugin_key = Column(String, ForeignKey("plugins.key"))
active = Column(Boolean, default=True)
# Columns plugin and target are created using backrefs
__table_args__ = (UniqueConstraint('target_id', 'plugin_key'),)
def __repr__(self):
return "<Work (target='%s', plugin='%s')>" % (self.target_id, self.plugin_key)
class Mapping(Base):
__tablename__ = 'mappings'
owtf_code = Column(String, primary_key=True)
mappings = Column(String)
category = Column(String, nullable=True)
|
|
import re
import json
import yaml
from random import randint
from invoker import ReplyObject, Command
from data.pokedex import Pokedex
from plugins.pasteImporter import PasteImporter
from .battle import Battle, Pokemon
from .battleLogic import getAction, getSwitch, getLead
# This currently only work in singles and not doubles / triples
class BattleHandler:
@staticmethod
def PSPackTeam(importable):
"""This method converts a PS importable to the packed format that PS use
to send teams to the server with
Args:
importable: string, the importable that should be converted to packed.
Returns:
String with the packed team.
Raises:
None.
"""
#TODO
return ''
def __init__(self, ws, name):
self.ws = ws
self.botName = name
self.ladderFormat = False
self.teams = {}
self.activeBattles = {}
self.supportedFormats = ['battlefactory',
'gen7challengecup1v1',
'gen7hackmonscup',
'gen7randombattle',
'gen8challengecup1v1',
'gen8challengecup',
'gen8hackmonscup',
'gen8randombattle']
try:
with open('plugins/battling/teams.yaml', 'r') as file:
self.teams = yaml.load(file)
if self.teams is None:
self.teams = {}
for meta in self.teams:
self.supportedFormats.append(meta)
except:
# No teams.yaml file exists, so create an empty one
with open('plugins/battling/teams.yaml', 'w+') as file:
yaml.dump(self.teams, file, default_flow_style = False, explicit_start = True)
def send(self, msg):
#print(msg) # uncomment if debugging
self.ws.send(msg)
def respond(self, battle, msg):
self.send('{room}|{msg}'.format(room = battle, msg = msg))
def newBattle(self, name):
self.activeBattles[name] = Battle(name)
def lead(self, battle, poke, rqid):
self.send('{room}|/team {mon}|{rqid}'.format(room = battle, mon = poke, rqid = rqid))
def act(self, battle, action, move, rqid):
self.send('{room}|/choose {act} {move}|{rqid}'.format(room = battle, act = action, move = str(move), rqid = rqid))
def makeMove(self, battle):
try:
action, actionType = getAction(battle, battle.name.split('-')[1])
self.act(battle.name, actionType, action, battle.rqid)
# There's a lot of very quiet bugs in the BattleLogic.getAction code
# so catch all the exceptions to get information about them.
except Exception as e:
import traceback
print('{}: {}'.format(type(e).__name__, e))
traceback.print_tb(e.__traceback__)
def handleOutcome(self, battle, won):
if won:
self.respond(battle.name, 'O-oh, I won?')
else:
self.respond(battle.name, 'I guess that was expected...')
print('Battle: {outcome} against {opponent}'.format(outcome = 'Won' if won else 'Lost', opponent = battle.other.name))
def getRandomTeam(self, metagame):
try:
teamCount = len(self.teams[metagame])
return self.teams[metagame][randint(0, teamCount - 1)]
except:
# No valid team for this format. It shouldn't happen but just in case
return ''
def setLadderFormat(self, format):
'''Sets the format used for laddering.
Args:
format: string, the format that is going to be laddered in.
Returns:
Bool: True if setting the team was successful, False otherwise.
Raises:
None.
'''
if not format in self.teams: return False
self.ladderFormat = format
return True
def clearLadderFormat(self):
self.ladderFormat = False
def getSpecies(self, details):
pokemon = details.split(',')[0].replace('-*', '')
if pokemon in Pokedex: return pokemon
pokemon = pokemon.split('-')[0]
return pokemon
def init(robot, room, roomtype):
if roomtype == 'battle': robot.bh.newBattle(room.title)
def title(robot, room, title):
if robot.name in title:
print('Battle: New battle between {}'.format(title))
def deinit(robot, room, *extra):
handler = robot.bh
battle = handler.activeBattles.pop(room.title)
if handler.ladderFormat and battle.ladderGame:
# Look for a new battle since the last one ended
robot.send('|/utm {}'.format(handler.getRandomTeam(handler.ladderFormat)))
robot.send('|/search {}'.format(handler.ladderFormat))
# Decorator for all the battle protocol functions
def battleprotocol(func):
def wrapper(robot, room, *params):
battle = robot.bh.activeBattles[room.title] if room.title in robot.bh.activeBattles else None
if not battle or battle.spectating: return
func(robot, robot.bh, battle, *params)
return wrapper
@battleprotocol
def request(robot, bh, battle, data):
try:
# This is where all the battle picking happen
request = json.loads(data)
except ValueError as e:
return e
if 'rqid' in request:
battle.rqid = request['rqid']
sidedata = request['side']
teamSlot = 1
for poke in sidedata['pokemon']:
battle.me.updateTeam(
Pokemon(bh.getSpecies(poke['details']),poke['details'],poke['condition'],poke['active'],
poke['stats'],poke['moves'],poke['baseAbility'],poke['item'], False, teamSlot, battle.me))
teamSlot += 1
if 'active' in request:
battle.myActiveData = request['active']
for pokemon in request['side']['pokemon']:
if pokemon['active']:
battle.me.setActive(battle.me.getPokemon(bh.getSpecies(pokemon['details'])))
if 'canMegaEvo' in request['active'][0]:
battle.me.active.canMega = battle.me.canMegaPokemon
if 'canUltraBurst' in request['active'][0]:
battle.me.active.canUltraBurst = battle.me.canUltraBurst
if 'forceSwitch' in request and request['forceSwitch'][0]:
bh.act(battle.name, 'switch', getSwitch(battle.me, battle.me.active, battle.other.active), battle.rqid)
@battleprotocol
def rated(robot, bh, battle, rating):
if not rating.startswith('Tournament'):
battle.isLadderMatch()
@battleprotocol
def rule(robot, bh, battle, rule):
if rule.startswith('Species Clause') or rule.startswith('Endless Battle Clause'):
battle.isNotHackmons()
if rule.startswith('Dynamax Clause'):
battle.dynamaxAllowed(False)
@battleprotocol
def generation(robot, bh, battle, gen):
battle.generation = int(gen)
if battle.generation < 8:
battle.dynamaxAllowed(False)
@battleprotocol
def pokemon(robot, bh, battle, id, pokemon, item = ''):
if not battle.me.id == id:
species = bh.getSpecies(pokemon)
stats = {'atk':1,'def':1,'spa':1,'spd':1,'spe':1}
moves = ['','','','']
hasMega = True if 'hasMega' in Pokedex[species] else False
battle.other.updateTeam(
Pokemon(
species, pokemon, '100/100', False,
stats, moves, Pokedex[species]['abilities']['0'],
'', hasMega, len(battle.other.team) + 1, battle.other))
@battleprotocol
def player(robot, bh, battle, pid, name, avatar = '', *rest):
if name == robot.name:
battle.setMe(name, pid)
bh.respond(battle.name, '/timer on')
else:
battle.setOther(name, pid)
@battleprotocol
def teampreview(robot, bh, battle, *args):
if not battle.me.id:
battle.spectating = True
else:
poke = getLead(battle.me.team, battle.other.team)
bh.lead(battle.name, poke, battle.rqid)
@battleprotocol
def battlestart(robot, bh, battle, *args):
# Reality check for non-teampreview gens
if not battle.me.id:
battle.spectating = True
@battleprotocol
def turn(robot, bh, battle, number):
bh.makeMove(battle)
@battleprotocol
def switch(robot, bh, battle, pid, details, hpstatus, cause = ''):
if pid.startswith(battle.me.id):
lastActive = battle.me.active
if lastActive:
lastActive.dynamax = False
newActive = battle.me.getPokemon(bh.getSpecies(details))
battle.me.setActive(newActive)
battle.me.changeTeamSlot(lastActive, battle.me.active)
else:
if battle.other.active:
battle.other.active.dynamax = False
mon = bh.getSpecies(details)
if mon not in battle.other.team:
battle.other.updateTeam(Pokemon(bh.getSpecies(details), details, '100/100', False,
{'atk':1,'def':1,'spa':1,'spd':1,'spe':1}, ['','','',''], '', '',
False, len(battle.other.team) + 1, battle.other))
battle.other.setActive(battle.other.getPokemon(mon))
@battleprotocol
def end(robot, bh, battle, winner):
bh.handleOutcome(battle, winner == robot.name)
bh.respond(battle.name, '/leave')
@battleprotocol
def tie(robot, bh, battle):
bh.handleOutcome(battle, False)
bh.respond(battle.name, '/leave')
# |-start|POKEMON|EFFECT
@battleprotocol
def start(robot, bh, battle, pid, effect, *rest):
if effect == 'Dynamax':
side = battle.me if pid.startswith(battle.me.id) else battle.other
side.active.dynamaxed = True
side.canDynamax = False
# |-end|POKEMON|EFFECT
@battleprotocol
def endEffect(robot, bh, battle, pid, effect, *rest):
if effect == 'Dynamax':
side = battle.me if pid.startswith(battle.me.id) else battle.other
side.active.dynamaxed = False
# |-mega|POKEMON|SPECIES|MEGASTONE
@battleprotocol
def mega(robot, bh, battle, pid, pokemon, megastone):
megapoke = pokemon + '-Mega'
if pokemon in ['Charizard', 'Mewtwo']:
megapoke += '-' + megastone.split()[1]
side = battle.me if pid.startswith(battle.me.id) else battle.other
side.removeBaseForm(pokemon, megapoke)
side.canMegaPokemon = False
side.active.canMega = False
# |-burst|POKEMON|SPECIES|ITEM
@battleprotocol
def burst(robot, bh, battle, pid, pokemon, stone):
ultraburst = 'Necrozma-Ultra'
side = battle.me if pid.startswith(battle.me.id) else battle.other
# Find the necrozma (works for species clause metas only!)
for p in side.team:
if p.startswith(pokemon):
pokemon = p
side.removeBaseForm(pokemon, ultraburst)
side.canUltraBurst = False
side.active.canUltraBurst = False
# |-primal|POKEMON|SPECIES|MEGASTONE
@battleprotocol
def primal(robot, bh, battle, pid, pokemon='', megastone=''):
primalpoke = pokemon + '-Primal'
side = battle.me if pid.startswith(battle.me.id) else battle.other
side.removeBaseForm(pokemon, primalpoke)
@battleprotocol
def formechange(robot, bh, battle, pid, forme, *rest):
if forme.endswith('Gmax'):
side = battle.me if pid.startswith(battle.me.id) else battle.other
side.active.dynamaxed = 'gmax'
side.canDynamax = False
@battleprotocol
def zmove(robot, bh, battle, pid):
if pid.startswith(battle.me.id):
battle.me.usedZmove()
else:
battle.other.usedZmove()
@battleprotocol
def move(robot, bh, battle, pid, usedmove, target, modifier = '', animation = ''):
moveid = robot.toId(usedmove)
if not pid.startswith(battle.me.id):
if moveid not in battle.other.active.moves:
battle.other.active.moves.append(moveid)
battle.other.active.markLastUsedMove(moveid)
else:
battle.me.active.markLastUsedMove(moveid)
@battleprotocol
def boost(robot, bh, battle, pid, stat, amount, modifier = ''):
if pid.startswith(battle.me.id):
battle.me.active.boosts[stat] += int(amount)
else:
battle.other.active.boosts[stat] += int(amount)
@battleprotocol
def unboost(robot, bh, battle, pid, stat, amount, modifier = ''):
if pid.startswith(battle.me.id):
battle.me.active.boosts[stat] -= int(amount)
else:
battle.other.active.boosts[stat] -= int(amount)
@battleprotocol
def heal(robot, bh, battle, pid, hpstatus, by = '', help = ''):
hp, status = (hpstatus.split() + [''])[:2] # Always get 2 values back from the split operation
if not pid.startswith(battle.me.id):
battle.other.active.setCondition(hp, status)
if '[from]' == by:
thing = by[len('[from] '):]
if 'item:' in thing:
battle.other.active.item = thing[len('item: '):]
elif 'ability' in thing:
pass
@battleprotocol
def status(robot, bh, battle, pid, condition, cause = '', of = ''):
if not pid.startswith(battle.me.id):
battle.other.active.setCondition(battle.other.active.condition, condition)
@battleprotocol
def faint(robot, bh, battle, pid):
if not pid.startswith(battle.me.id):
battle.other.active.setCondition('0', 'fnt')
@battleprotocol
def error(robot, bh, battle, cause, *information):
if '[Invalid choice]' == cause:
battle.me.active.trapped = True
# Only reason for an invalid choice should be because we're trapped...
trappingAbilities = ['Shadow Tag', 'Arena Trap', 'Magnet Pull']
otherActiveAbilities = Pokedex[battle.other.active.species]['abilities']
for option in otherActiveAbilities:
if otherActiveAbilities[option] in trappingAbilities:
battle.other.active.ability = otherActiveAbilities[option]
print('{battle}| {active} got invalid choice, trying something else'.format(battle = battle, active = battle.me.active.species))
bh.makeMove(battle) # Try again
def startLaddering(bot, cmd, msg, user):
reply = ReplyObject('', reply = True)
if not user.isOwner: return reply.response('Only owner is allowed to do this.')
if bot.toId(msg) == 'false':
bot.bh.clearLadderFormat()
return reply.response('Stopped laddering.')
if not bot.bh.setLadderFormat(msg): return reply.response('Starting to ladder failed, no valid teams for format: {}.'.format(msg))
# Now that we know that we have valid teams for laddering, and the settings
# to restart after finishing a game are set, we can now begin.
# Note: To ladder in formats with random teams, add an empty string to that format in teams.yaml.
bot.send('|/utm {}'.format(bot.bh.getRandomTeam(bot.bh.ladderFormat)))
bot.send('|/search {}'.format(bot.bh.ladderFormat))
return reply.response('Started laddering in format: {}'.format(bot.bh.ladderFormat))
def acceptTeam(bot, cmd, msg):
reply = ReplyObject('', reply = True, broadcast = True)
meta, team = msg.replace(' ', '').split(',')
if not team: return reply.response('You forgot a team')
# Resolve links to teams
if team.startswith('http'):
team = PasteImporter.getPasteContent(team)
if not team:
return reply.response('Unsupported paste type (probably)')
# If the pasted team was an importable instead of packed, pack it
if '|' not in team:
team = BattleHandler.PSPackTeam(team)
# Double check so it actually is packed
if '|' not in team: return reply.response("This team doesn't look like a valid packed team :(")
meta = bot.toId(meta)
if not meta in bot.bh.teams:
bot.bh.teams[meta] = []
if not team in bot.bh.teams[meta]:
bot.bh.teams[meta].append(team)
else:
return reply.response('I already have that team! :D')
if not meta in bot.bh.supportedFormats:
bot.bh.supportedFormats.append(meta)
with open('plugins/battling/teams.yaml', 'w+') as file:
yaml.dump(bot.bh.teams, file, default_flow_style = False, explicit_start = True)
return reply.response('Saved that team for you so that I can play with it :)')
# Exports
handlers = {
'init': init,
'title': title,
'start': battlestart,
'deinit': deinit,
'rated': rated,
'gen': generation,
'request': request,
'rule': rule,
'poke': pokemon,
'player': player,
'teampreview': teampreview,
'turn': turn,
'switch': switch,
'win': end,
'tie': tie,
# In-battle events
# Most of these events just keep track of how the game is progressing
# as a lot of information about the own team is sent by the request for action
'-mega': mega,
'-burst': burst,
'-primal': primal,
'-zmove': zmove,
'-zpower': zmove,
# Dynamaxing goes by -start and -end events
# Other volatile statuses (confusion, taunt, substitute, etc.) also use this
'-start': start,
'-end': endEffect,
# Gmaxing currently only thing handled here
'-formechange': formechange,
# This keeps track of what moves the opponent has revealed and the last used move from either side
'move': move,
'-boost': boost,
'-unboost': unboost,
# Because of how they're treated, taking damage and healing are the same thing
'-heal': heal,
'-damage': heal,
'-status': status,
'-faint': faint,
'error': error
}
commands = [
Command(['storeteam'], acceptTeam),
Command(['ladder'], startLaddering)
]
|
|
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library handling DevTools websocket interaction.
"""
import httplib
import json
import logging
import os
import sys
file_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(file_dir, '..', '..', 'perf'))
from chrome_telemetry_build import chromium_config
sys.path.append(chromium_config.GetTelemetryDir())
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.backends.chrome_inspector import websocket
import common_util
DEFAULT_TIMEOUT_SECONDS = 10 # seconds
class DevToolsConnectionException(Exception):
def __init__(self, message):
super(DevToolsConnectionException, self).__init__(message)
logging.warning("DevToolsConnectionException: " + message)
# Taken from telemetry.internal.backends.chrome_inspector.tracing_backend.
# TODO(mattcary): combine this with the above and export?
class _StreamReader(object):
def __init__(self, inspector, stream_handle):
self._inspector_websocket = inspector
self._handle = stream_handle
self._callback = None
self._data = None
def Read(self, callback):
# Do not allow the instance of this class to be reused, as
# we only read data sequentially at the moment, so a stream
# can only be read once.
assert not self._callback
self._data = []
self._callback = callback
self._ReadChunkFromStream()
# Queue one extra read ahead to avoid latency.
self._ReadChunkFromStream()
def _ReadChunkFromStream(self):
# Limit max block size to avoid fragmenting memory in sock.recv(),
# (see https://github.com/liris/websocket-client/issues/163 for details)
req = {'method': 'IO.read', 'params': {
'handle': self._handle, 'size': 32768}}
self._inspector_websocket.AsyncRequest(req, self._GotChunkFromStream)
def _GotChunkFromStream(self, response):
# Quietly discard responses from reads queued ahead after EOF.
if self._data is None:
return
if 'error' in response:
raise DevToolsConnectionException(
'Reading trace failed: %s' % response['error']['message'])
result = response['result']
self._data.append(result['data'])
if not result.get('eof', False):
self._ReadChunkFromStream()
return
req = {'method': 'IO.close', 'params': {'handle': self._handle}}
self._inspector_websocket.SendAndIgnoreResponse(req)
trace_string = ''.join(self._data)
self._data = None
self._callback(trace_string)
class DevToolsConnection(object):
"""Handles the communication with a DevTools server.
"""
TRACING_DOMAIN = 'Tracing'
TRACING_END_METHOD = 'Tracing.end'
TRACING_DATA_METHOD = 'Tracing.dataCollected'
TRACING_DONE_EVENT = 'Tracing.tracingComplete'
TRACING_STREAM_EVENT = 'Tracing.tracingComplete' # Same as TRACING_DONE.
TRACING_TIMEOUT = 300
def __init__(self, hostname, port):
"""Initializes the connection with a DevTools server.
Args:
hostname: server hostname.
port: port number.
"""
self._http_hostname = hostname
self._http_port = port
self._event_listeners = {}
self._domain_listeners = {}
self._scoped_states = {}
self._domains_to_enable = set()
self._tearing_down_tracing = False
self._please_stop = False
self._ws = None
self._target_descriptor = None
self._Connect()
def RegisterListener(self, name, listener):
"""Registers a listener for an event.
Also takes care of enabling the relevant domain before starting monitoring.
Args:
name: (str) Domain or event the listener wants to listen to, e.g.
"Network.requestWillBeSent" or "Tracing".
listener: (Listener) listener instance.
"""
if '.' in name:
domain = name[:name.index('.')]
self._event_listeners[name] = listener
else:
domain = name
self._domain_listeners[domain] = listener
self._domains_to_enable.add(domain)
def UnregisterListener(self, listener):
"""Unregisters a listener.
Args:
listener: (Listener) listener to unregister.
"""
keys = ([k for k, l in self._event_listeners if l is listener] +
[k for k, l in self._domain_listeners if l is listener])
assert keys, "Removing non-existent listener"
for key in keys:
if key in self._event_listeners:
del(self._event_listeners[key])
if key in self._domain_listeners:
del(self._domain_listeners[key])
def SetScopedState(self, method, params, default_params, enable_domain):
"""Changes state at the beginning the monitoring and resets it at the end.
|method| is called with |params| at the beginning of the monitoring. After
the monitoring completes, the state is reset by calling |method| with
|default_params|.
Args:
method: (str) Method.
params: (dict) Parameters to set when the monitoring starts.
default_params: (dict) Parameters to reset the state at the end.
enable_domain: (bool) True if enabling the domain is required.
"""
if enable_domain:
if '.' in method:
domain = method[:method.index('.')]
assert domain, 'No valid domain'
self._domains_to_enable.add(domain)
scoped_state_value = (params, default_params)
if self._scoped_states.has_key(method):
assert self._scoped_states[method] == scoped_state_value
else:
self._scoped_states[method] = scoped_state_value
def SyncRequest(self, method, params=None):
"""Issues a synchronous request to the DevTools server.
Args:
method: (str) Method.
params: (dict) Optional parameters to the request.
Returns:
The answer.
"""
request = {'method': method}
if params:
request['params'] = params
return self._ws.SyncRequest(request)
def SendAndIgnoreResponse(self, method, params=None):
"""Issues a request to the DevTools server, do not wait for the response.
Args:
method: (str) Method.
params: (dict) Optional parameters to the request.
"""
request = {'method': method}
if params:
request['params'] = params
self._ws.SendAndIgnoreResponse(request)
def SyncRequestNoResponse(self, method, params=None):
"""As SyncRequest, but asserts that no meaningful response was received.
Args:
method: (str) Method.
params: (dict) Optional parameters to the request.
"""
result = self.SyncRequest(method, params)
if 'error' in result or ('result' in result and
result['result']):
raise DevToolsConnectionException(
'Unexpected response for %s: %s' % (method, result))
def ClearCache(self):
"""Clears buffer cache.
Will assert that the browser supports cache clearing.
"""
res = self.SyncRequest('Network.canClearBrowserCache')
assert res['result'], 'Cache clearing is not supported by this browser.'
self.SyncRequest('Network.clearBrowserCache')
def MonitorUrl(self, url, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
"""Navigate to url and dispatch monitoring loop.
Unless you have registered a listener that will call StopMonitoring, this
will run until timeout from chrome.
Args:
url: (str) a URL to navigate to before starting monitoring loop.\
timeout_seconds: timeout in seconds for monitoring loop.
"""
for domain in self._domains_to_enable:
self._ws.RegisterDomain(domain, self._OnDataReceived)
if domain != self.TRACING_DOMAIN:
self.SyncRequestNoResponse('%s.enable' % domain)
# Tracing setup must be done by the tracing track to control filtering
# and output.
for scoped_state in self._scoped_states:
self.SyncRequestNoResponse(scoped_state,
self._scoped_states[scoped_state][0])
self._tearing_down_tracing = False
self.SendAndIgnoreResponse('Page.navigate', {'url': url})
self._Dispatch(timeout=timeout_seconds)
self._TearDownMonitoring()
def StopMonitoring(self):
"""Stops the monitoring."""
self._please_stop = True
def ExecuteJavaScript(self, expression):
"""Run JavaScript expression.
Args:
expression: JavaScript expression to run.
Returns:
The return value from the JavaScript expression.
"""
response = self.SyncRequest('Runtime.evaluate', {
'expression': expression,
'returnByValue': True})
if 'error' in response:
raise Exception(response['error']['message'])
if 'wasThrown' in response['result'] and response['result']['wasThrown']:
raise Exception(response['error']['result']['description'])
if response['result']['result']['type'] == 'undefined':
return None
return response['result']['result']['value']
def PollForJavaScriptExpression(self, expression, interval):
"""Wait until JavaScript expression is true.
Args:
expression: JavaScript expression to run.
interval: Period between expression evaluation in seconds.
"""
common_util.PollFor(lambda: bool(self.ExecuteJavaScript(expression)),
'JavaScript: {}'.format(expression),
interval)
def Close(self):
"""Cleanly close chrome by closing the only tab."""
assert self._ws
response = self._HttpRequest('/close/' + self._target_descriptor['id'])
assert response == 'Target is closing'
self._ws = None
def _Dispatch(self, timeout, kind='Monitoring'):
self._please_stop = False
while not self._please_stop:
try:
self._ws.DispatchNotifications(timeout=timeout)
except websocket.WebSocketTimeoutException:
break
if not self._please_stop:
logging.warning('%s stopped on a timeout.' % kind)
def _TearDownMonitoring(self):
if self.TRACING_DOMAIN in self._domains_to_enable:
logging.info('Fetching tracing')
self.SyncRequestNoResponse(self.TRACING_END_METHOD)
self._tearing_down_tracing = True
self._Dispatch(timeout=self.TRACING_TIMEOUT, kind='Tracing')
for scoped_state in self._scoped_states:
self.SyncRequestNoResponse(scoped_state,
self._scoped_states[scoped_state][1])
for domain in self._domains_to_enable:
if domain != self.TRACING_DOMAIN:
self.SyncRequest('%s.disable' % domain)
self._ws.UnregisterDomain(domain)
self._domains_to_enable.clear()
self._domain_listeners.clear()
self._event_listeners.clear()
self._scoped_states.clear()
def _OnDataReceived(self, msg):
if 'method' not in msg:
raise DevToolsConnectionException('Malformed message: %s' % msg)
method = msg['method']
domain = method[:method.index('.')]
if self._tearing_down_tracing and method == self.TRACING_STREAM_EVENT:
stream_handle = msg.get('params', {}).get('stream')
if not stream_handle:
self._tearing_down_tracing = False
self.StopMonitoring()
# Fall through to regular dispatching.
else:
_StreamReader(self._ws, stream_handle).Read(self._TracingStreamDone)
# Skip regular dispatching.
return
if (method not in self._event_listeners and
domain not in self._domain_listeners):
return
if method in self._event_listeners:
self._event_listeners[method].Handle(method, msg)
if domain in self._domain_listeners:
self._domain_listeners[domain].Handle(method, msg)
if self._tearing_down_tracing and method == self.TRACING_DONE_EVENT:
self._tearing_down_tracing = False
self.StopMonitoring()
def _TracingStreamDone(self, data):
tracing_events = json.loads(data)
for evt in tracing_events:
self._OnDataReceived({'method': self.TRACING_DATA_METHOD,
'params': {'value': [evt]}})
if self._please_stop:
break
self._tearing_down_tracing = False
self.StopMonitoring()
def _HttpRequest(self, path):
assert path[0] == '/'
r = httplib.HTTPConnection(self._http_hostname, self._http_port)
try:
r.request('GET', '/json' + path)
response = r.getresponse()
if response.status != 200:
raise DevToolsConnectionException(
'Cannot connect to DevTools, reponse code %d' % response.status)
raw_response = response.read()
finally:
r.close()
return raw_response
def _Connect(self):
assert not self._ws
assert not self._target_descriptor
for target_descriptor in json.loads(self._HttpRequest('/list')):
if target_descriptor['type'] == 'page':
self._target_descriptor = target_descriptor
break
assert self._target_descriptor['url'] == 'about:blank'
self._ws = inspector_websocket.InspectorWebsocket()
self._ws.Connect(self._target_descriptor['webSocketDebuggerUrl'])
class Listener(object):
"""Listens to events forwarded by a DevToolsConnection instance."""
def __init__(self, connection):
"""Initializes a Listener instance.
Args:
connection: (DevToolsConnection).
"""
pass
def Handle(self, method, msg):
"""Handles an event this instance listens for.
Args:
event_name: (str) Event name, as registered.
event: (dict) complete event.
"""
pass
class Track(Listener):
"""Collects data from a DevTools server."""
def GetEvents(self):
"""Returns a list of collected events, finalizing the state if necessary."""
pass
def ToJsonDict(self):
"""Serializes to a dictionary, to be dumped as JSON.
Returns:
A dict that can be dumped by the json module, and loaded by
FromJsonDict().
"""
pass
@classmethod
def FromJsonDict(cls, json_dict):
"""Returns a Track instance constructed from data dumped by
Track.ToJsonDict().
Args:
json_data: (dict) Parsed from a JSON file using the json module.
Returns:
a Track instance.
"""
pass
|
|
# Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenFlow1.3 flow table for OFAgent
* requirements
** plain OpenFlow 1.3. no vendor extensions.
* legends
xxx: network id (agent internal use)
yyy: segment id (vlan id, gre key, ...)
a,b,c: tunnel port (tun_ofports, map[net_id].tun_ofports)
i,j,k: vm port (map[net_id].vif_ports[vif_id].ofport)
x,y,z: physical port (int_ofports)
N: tunnel type (0 for TYPE_GRE, 1 for TYPE_xxx, ...)
iii: unknown ip address
uuu: unicast l2 address
* tables (in order)
CHECK_IN_PORT
TUNNEL_IN+N
PHYS_IN
LOCAL_IN
ARP_PASSTHROUGH
ARP_RESPONDER
TUNNEL_OUT
LOCAL_OUT
PHYS_OUT
TUNNEL_FLOOD+N
PHYS_FLOOD
LOCAL_FLOOD
* CHECK_IN_PORT
for each vm ports:
// check_in_port_add_local_port, check_in_port_delete_port
in_port=i, write_metadata(LOCAL|xxx),goto(LOCAL_IN)
TYPE_GRE
for each tunnel ports:
// check_in_port_add_tunnel_port, check_in_port_delete_port
in_port=a, goto(TUNNEL_IN+N)
TYPE_VLAN
for each networks ports:
// provision_tenant_physnet, reclaim_tenant_physnet
in_port=x,vlan_vid=present|yyy, write_metadata(xxx),goto(PHYS_IN)
TYPE_FLAT
// provision_tenant_physnet, reclaim_tenant_physnet
in_port=x, write_metadata(xxx),goto(PHYS_IN)
default drop
* TUNNEL_IN+N (per tunnel types) tunnel -> network
for each networks:
// provision_tenant_tunnel, reclaim_tenant_tunnel
tun_id=yyy, write_metadata(xxx),goto(TUNNEL_OUT)
default drop
* PHYS_IN
default goto(TUNNEL_OUT)
* LOCAL_IN
default goto(next_table)
* ARP_PASSTHROUGH
for each unknown tpa:
// arp_passthrough
arp,arp_op=request,metadata=xxx,tpa=iii, idle_timeout=5, goto(TUNNEL_OUT)
default goto(next_table)
* ARP_RESPONDER
arp,arp_op=request, output:controller
default goto(next_table)
* TUNNEL_OUT
TYPE_GRE
// !FLOODING_ENTRY
// install_tunnel_output, delete_tunnel_output
metadata=LOCAL|xxx,eth_dst=uuu set_tunnel(yyy),output:a
default goto(next table)
* LOCAL_OUT
for each known destinations:
// local_out_add_port, local_out_delete_port
metadata=xxx,eth_dst=uuu output:i
default goto(next table)
* PHYS_OUT
NOTE(yamamoto): currently this table is always empty.
default goto(next table)
* TUNNEL_FLOOD+N. (per tunnel types)
network -> tunnel/vlan
output to tunnel/physical ports
"next table" might be LOCAL_OUT
TYPE_GRE
for each networks:
// FLOODING_ENTRY
// install_tunnel_output, delete_tunnel_output
metadata=LOCAL|xxx, set_tunnel(yyy),output:a,b,c,goto(next table)
default goto(next table)
* PHYS_FLOOD
TYPE_VLAN
for each networks:
// provision_tenant_physnet, reclaim_tenant_physnet
metadata=LOCAL|xxx, push_vlan:0x8100,set_field:present|yyy->vlan_vid,
output:x,pop_vlan,goto(next table)
TYPE_FLAT
for each networks:
// provision_tenant_physnet, reclaim_tenant_physnet
metadata=LOCAL|xxx, output:x,goto(next table)
default goto(next table)
* LOCAL_FLOOD
for each networks:
// local_flood_update, local_flood_delete
metadata=xxx, output:i,j,k
or
metadata=xxx,eth_dst=broadcast, output:i,j,k
default drop
* references
** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
*** we use metadata instead of "internal" VLANs
*** we don't want to use NX learn action
"""
from ryu.lib.packet import arp
from ryu.ofproto import ether
from neutron.plugins.common import constants as p_const
import networking_ofagent.plugins.ofagent.agent.metadata as meta
from networking_ofagent.plugins.ofagent.agent import ofswitch
from networking_ofagent.plugins.ofagent.agent import tables
class OFAgentIntegrationBridge(ofswitch.OpenFlowSwitch):
"""ofagent br-int specific logic."""
def setup_default_table(self):
self.delete_flows()
self.install_default_drop(tables.CHECK_IN_PORT)
for t in tables.TUNNEL_IN.values():
self.install_default_drop(t)
self.install_default_goto(tables.PHYS_IN, tables.TUNNEL_OUT)
self.install_default_goto_next(tables.LOCAL_IN)
self.install_default_goto_next(tables.ARP_PASSTHROUGH)
self.install_arp_responder(tables.ARP_RESPONDER)
self.install_default_goto_next(tables.TUNNEL_OUT)
self.install_default_goto_next(tables.LOCAL_OUT)
self.install_default_goto_next(tables.PHYS_OUT)
for t in tables.TUNNEL_FLOOD.values():
self.install_default_goto_next(t)
self.install_default_goto_next(tables.PHYS_FLOOD)
self.install_default_drop(tables.LOCAL_FLOOD)
def install_arp_responder(self, table_id):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(eth_type=ether.ETH_TYPE_ARP,
arp_op=arp.ARP_REQUEST)
actions = [ofpp.OFPActionOutput(ofp.OFPP_CONTROLLER)]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
msg = ofpp.OFPFlowMod(dp,
table_id=table_id,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
self.install_default_goto_next(table_id)
def install_tunnel_output(self, table_id,
network, segmentation_id,
ports, goto_next, **additional_matches):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network, meta.LOCAL),
**additional_matches)
actions = [ofpp.OFPActionSetField(tunnel_id=segmentation_id)]
actions += [ofpp.OFPActionOutput(port=p) for p in ports]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
if goto_next:
instructions += [
ofpp.OFPInstructionGotoTable(table_id=table_id + 1),
]
msg = ofpp.OFPFlowMod(dp,
table_id=table_id,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def delete_tunnel_output(self, table_id,
network, **additional_matches):
(dp, _ofp, ofpp) = self._get_dp()
self.delete_flows(table_id=table_id,
metadata=meta.mk_metadata(network, meta.LOCAL),
**additional_matches)
def provision_tenant_tunnel(self, network_type, network, segmentation_id):
(dp, _ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(tunnel_id=segmentation_id)
metadata = meta.mk_metadata(network)
instructions = [
ofpp.OFPInstructionWriteMetadata(metadata=metadata[0],
metadata_mask=metadata[1]),
ofpp.OFPInstructionGotoTable(table_id=tables.TUNNEL_OUT),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.TUNNEL_IN[network_type],
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def reclaim_tenant_tunnel(self, network_type, network, segmentation_id):
table_id = tables.TUNNEL_IN[network_type]
self.delete_flows(table_id=table_id, tunnel_id=segmentation_id)
def provision_tenant_physnet(self, network_type, network,
segmentation_id, phys_port):
"""for vlan and flat."""
assert(network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT])
(dp, ofp, ofpp) = self._get_dp()
# inbound
metadata = meta.mk_metadata(network)
instructions = [
ofpp.OFPInstructionWriteMetadata(metadata=metadata[0],
metadata_mask=metadata[1])
]
if network_type == p_const.TYPE_VLAN:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
match = ofpp.OFPMatch(in_port=phys_port, vlan_vid=vlan_vid)
actions = [ofpp.OFPActionPopVlan()]
instructions += [ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
else:
match = ofpp.OFPMatch(in_port=phys_port)
instructions += [ofpp.OFPInstructionGotoTable(table_id=tables.PHYS_IN)]
msg = ofpp.OFPFlowMod(dp,
priority=1,
table_id=tables.CHECK_IN_PORT,
match=match,
instructions=instructions)
self._send_msg(msg)
# outbound
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network, meta.LOCAL))
if network_type == p_const.TYPE_VLAN:
actions = [
ofpp.OFPActionPushVlan(),
ofpp.OFPActionSetField(vlan_vid=vlan_vid),
]
else:
actions = []
actions += [ofpp.OFPActionOutput(port=phys_port)]
if network_type == p_const.TYPE_VLAN:
actions += [ofpp.OFPActionPopVlan()]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
ofpp.OFPInstructionGotoTable(table_id=tables.PHYS_FLOOD + 1),
]
msg = ofpp.OFPFlowMod(dp,
priority=1,
table_id=tables.PHYS_FLOOD,
match=match,
instructions=instructions)
self._send_msg(msg)
def reclaim_tenant_physnet(self, network_type, network,
segmentation_id, phys_port):
(_dp, ofp, _ofpp) = self._get_dp()
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
if network_type == p_const.TYPE_VLAN:
self.delete_flows(table_id=tables.CHECK_IN_PORT,
in_port=phys_port, vlan_vid=vlan_vid)
else:
self.delete_flows(table_id=tables.CHECK_IN_PORT,
in_port=phys_port)
self.delete_flows(table_id=tables.PHYS_FLOOD,
metadata=meta.mk_metadata(network))
def check_in_port_add_tunnel_port(self, network_type, port):
(dp, _ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(in_port=port)
instructions = [
ofpp.OFPInstructionGotoTable(
table_id=tables.TUNNEL_IN[network_type])
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.CHECK_IN_PORT,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def check_in_port_add_local_port(self, network, port):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(in_port=port)
metadata = meta.mk_metadata(network, meta.LOCAL)
instructions = [
ofpp.OFPInstructionWriteMetadata(metadata=metadata[0],
metadata_mask=metadata[1]),
ofpp.OFPInstructionGotoTable(table_id=tables.LOCAL_IN),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.CHECK_IN_PORT,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def check_in_port_delete_port(self, port):
self.delete_flows(table_id=tables.CHECK_IN_PORT, in_port=port)
def local_flood_update(self, network, ports, flood_unicast):
(dp, ofp, ofpp) = self._get_dp()
match_all = ofpp.OFPMatch(metadata=meta.mk_metadata(network))
match_multicast = ofpp.OFPMatch(metadata=meta.mk_metadata(network),
eth_dst=('01:00:00:00:00:00',
'01:00:00:00:00:00'))
if flood_unicast:
match_add = match_all
match_del = match_multicast
else:
match_add = match_multicast
match_del = match_all
actions = [ofpp.OFPActionOutput(port=p) for p in ports]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.LOCAL_FLOOD,
priority=1,
match=match_add,
instructions=instructions)
self._send_msg(msg)
self.delete_flows(table_id=tables.LOCAL_FLOOD, strict=True,
priority=1, match=match_del)
def local_flood_delete(self, network):
self.delete_flows(table_id=tables.LOCAL_FLOOD,
metadata=meta.mk_metadata(network))
def local_out_add_port(self, network, port, mac):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network), eth_dst=mac)
actions = [ofpp.OFPActionOutput(port=port)]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.LOCAL_OUT,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def local_out_delete_port(self, network, mac):
self.delete_flows(table_id=tables.LOCAL_OUT,
metadata=meta.mk_metadata(network), eth_dst=mac)
def arp_passthrough(self, network, tpa):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network),
eth_type=ether.ETH_TYPE_ARP,
arp_op=arp.ARP_REQUEST,
arp_tpa=tpa)
instructions = [
ofpp.OFPInstructionGotoTable(table_id=tables.TUNNEL_OUT)]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.ARP_PASSTHROUGH,
priority=1,
idle_timeout=5,
match=match,
instructions=instructions)
self._send_msg(msg)
|
|
'''
Created on Sep 19, 2012
@author: steve.a.jarvis@gmail.com
A flexible, neat neural network.
'''
from __future__ import print_function
from random import random
import math
class NeuralNetwork(object):
'''
The public interface is meant to be used like this:
nn = NeuralNet(number_in, number_hid, number_out)
while not_satisfied:
nn.train(data, change_rate, momentum_rate, iterations)
answer = nn.evaluate(inputs)
nn.save_weights('/some/path')
nn.load_weights('/some/path')
'''
def __init__(self, nin, nhid, nout):
# Check for valid input
for param in [nin, nhid, nout]:
if param == 0:
raise ValueError('Must have more than one node for each layer')
elif not isinstance(param, int):
raise ValueError('Dimensions of network must be ints.')
# Add one to the input layer to act as a bias. The bias helps ensure
# that the network is able to learn the related function by shifting
# the graph as necessary.
num_input = nin + 1
num_hidden = nhid
num_output = nout
self.weights_hid_one = self._make_matrix(num_input, num_hidden)
self.weights_hid_two = self._make_matrix(num_hidden, num_hidden)
self.weights_out = self._make_matrix(num_hidden, num_output)
self.momentum_hid_one = self._make_matrix(num_input, num_hidden, 0.0)
self.momentum_hid_two = self._make_matrix(num_hidden, num_hidden, 0.0)
self.momentum_out = self._make_matrix(num_hidden, num_output, 0.0)
def evaluate(self, input_vals):
'''
Will return a list of guesses per neuron based on the information
we have.
'''
# Can be passed as a tuple.
input_vals = list(input_vals)
if len(input_vals) != len(self.weights_hid_one) - 1:
raise ValueError('Input values don\'t mesh with the number of neurons.')
# The bias neuron
input_vals.append(1)
self.activation_in = input_vals
# Find the hidden layers activation levels. The activation
# levels are the sum of products of weights and neurons in (the dot
# product). Then tanh yields the actual activation value.
cross = self._dot(self.activation_in, self.weights_hid_one)
self.activation_hid_one = [self._tanh(val) for val in cross]
# Second hidden layer
cross = self._dot(self.activation_hid_one, self.weights_hid_two)
self.activation_hid_two = [self._tanh(val) for val in cross]
# Output activations just like the hidden layers'.
cross = self._dot(self.activation_hid_two, self.weights_out)
self.activation_out = [self._tanh(val) for val in cross]
return self.activation_out
def _back_propagate(self, target, change_mult, momentum_mult):
'''
Work from the output of the network back up adjusting
weights to inch nearer the connections (and therefore the answers) we
want.
'''
# Target could have been passed as an int, but needs to be expandable
if type(target) is int:
target = [target]
# First calculate deltas of the output weights.
# delta = (expected - actual) * d(tanh(a))/da
delta_out = [0.0] * len(self.weights_out[0])
for j in range(len(self.weights_out[0])):
error = target[j] - self.activation_out[j]
delta_out[j] = error * self._derivative_tanh(self.activation_out[j])
# Need to start from the tail end and work up.
# Update the output weights.
self._update_weights(self.weights_out, self.activation_hid_two,
delta_out, self.momentum_out,
change_mult, momentum_mult)
# Find deltas for the second hidden layer.
delta_hid_two = self._calc_deltas(self.activation_hid_two,
delta_out, self.weights_out)
# Update the weights for hidden layer.
self._update_weights(self.weights_hid_two, self.activation_hid_one,
delta_hid_two, self.momentum_hid_two,
change_mult, momentum_mult)
# After the hid two weights change, find deltas for hid1.
delta_hid_one = self._calc_deltas(self.activation_hid_one,
delta_hid_two, self.weights_hid_two)
# And update the hid one weights
self._update_weights(self.weights_hid_one, self.activation_in,
delta_hid_one, self.momentum_hid_one,
change_mult, momentum_mult)
def _calc_deltas(self, activations, deltas_downstream, weights_downstream):
# Calculate the deltas of the hidden layer.
# delta = sum(downstream weights * deltas) * d(tanh(a))/da
deltas = [0.0] * len(activations)
for j in range(len(activations)):
error = 0.0
# This inner loop sums all errors downstream of the current neuron
for k in range(len(deltas_downstream)):
error += deltas_downstream[k] * weights_downstream[j][k]
deltas[j] = error * self._derivative_tanh(activations[j])
return deltas
def _update_weights(self, changing_weights, activations_upstream,
deltas_downstream, momentums, change_co, mom_co):
'''
Update weights in the given layer, based on the upstream values,
downstream mistakes, and change rates.
'''
# change = cofactor * delta * current_value + momentum
# weights += changes
for j in range(len(changing_weights)):
for k in range(len(deltas_downstream)):
change = change_co * deltas_downstream[k] * activations_upstream[j]
# This works because parameters are passed by value but are
# references to the variable. Lists are mutable, so changes will
# be reflected IRL.
changing_weights[j][k] += change + (mom_co * momentums[j][k])
# Momentum speeds up learning by minimizing "zig zagginess".
momentums[j][k] = change
def train_network(self, data_train, change_rate=0.4,
momentum=0.1, iters=1000):
'''
Train the network with repeated evaluations and back propagations.
Data is passed as a list of input, target pairs.
'''
# First train the network.
for i in range(iters):
# Choose a random element from the training set.
selection = math.floor(random() * len(data_train))
data = data_train[int(selection)]
self.evaluate(data[0])
self._back_propagate(data[1], change_rate, momentum)
def load_weights(self, source):
'''
In actual implementation it would be inefficient to train the
network each time. Instead, save and load weights.
'''
# Lazy import, might not be necessary.
import shelve
d = shelve.open(source, flag='r')
hid_one_temp = d['weights_hid_one']
hid_two_temp = d['weights_hid_two']
out_temp = d['weights_out']
if (len(self.weights_hid_one) != len(hid_one_temp)
or len(self.weights_out) != len(out_temp)):
raise ValueError('Wrong dimensions on set of weights.')
self.weights_hid_one = hid_one_temp
self.weights_hid_two = hid_two_temp
self.weights_out = out_temp
d.close()
def save_weights(self, dest):
'''
Save the current weights with shelve.
'''
# Lazy import, might not be necessary.
import shelve
d = shelve.open(dest)
d['weights_hid_one'] = self.weights_hid_one
d['weights_hid_two'] = self.weights_hid_two
d['weights_out'] = self.weights_out
d.close()
def _make_matrix(self, depth, breadth, fill=None):
matrix = []
for row in range(depth):
matrix.append([])
for col in range(breadth):
if fill is None:
num = 0.5 * (random() - 0.5)
else:
num = fill
matrix[len(matrix) - 1].append(num)
return matrix
def _tanh(self, x):
'''
Return the hyberbolic tangent of x. Tanh produces a nice sigmoidal
function to use for the evaluations.
'''
return ((math.e ** (2 * x)) - 1) / ((math.e ** (2 * x)) + 1)
def _derivative_tanh(self, y):
'''
Given the activation value of a neuron (the output of tanh(x))
return the derivative of tanh for that y.
'''
# Proof this is equal to the derivative of tanh(x):
# let y = tanh(x)
# d/dx tanh(x) = sech^2(x) // From Wolfram
# sech^2(x) = 1/cosh^2(x)
# sech^2(x) = (cosh^2(x) - sinh^2(x)) / cosh^2(x)
# sech^2(x) = 1 - sinh^2(x) / cosh^2(x)
# sech^2(x) = 1 - tanh^2(x)
# sech^2(x) = 1 - y^2 // Substitute given. Boom.
return 1 - (y ** 2)
def _dot(self, m1, m2):
'''
Specific dot function. m1 must be the activation list, m2
must be a matrix with depth equal to len(m1)
'''
if len(m1) != len(m2):
raise ValueError('Can\'t dot those matrices, size matters.')
new_matrix = []
for j in range(len(m2[0])):
dot = 0.0
for k in range(len(m1)):
dot += m1[k] * m2[k][j]
new_matrix.append(dot)
return new_matrix
|
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Cinder."""
import os.path
import urllib2
from oslo.config import cfg
import six
from cinder import context
from cinder import exception
import cinder.openstack.common.policy
from cinder.openstack.common import policy as common_policy
from cinder import policy
from cinder import test
from cinder import utils
CONF = cfg.CONF
class PolicyFileTestCase(test.TestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
# since is_admin is defined by policy, create context before reset
self.context = context.RequestContext('fake', 'fake')
policy.reset()
self.target = {}
self.addCleanup(policy.reset)
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename)
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": []}""")
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ["false:false"]}""")
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
class PolicyTestCase(test.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
policy.reset()
# NOTE(vish): preload rules to circumvent reloading from file
policy.init()
rules = {
"true": [],
"example:allowed": [],
"example:denied": [["false:false"]],
"example:get_http": [["http:http://www.example.com"]],
"example:my_file": [["role:compute_admin"],
["project_id:%(project_id)s"]],
"example:early_and_fail": [["false:false", "rule:true"]],
"example:early_or_success": [["rule:true"], ["false:false"]],
"example:lowercase_admin": [["role:admin"], ["role:sysadmin"]],
"example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]],
}
# NOTE(vish): then overload underlying brain
common_policy.set_brain(common_policy.Brain(rules))
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
self.addCleanup(policy.reset)
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_good_action(self):
action = "example:allowed"
policy.enforce(self.context, action, self.target)
def test_enforce_http_true(self):
def fakeurlopen(url, post_data):
return six.StringIO("True")
self.stubs.Set(urllib2, 'urlopen', fakeurlopen)
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertIsNone(result)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
self.stubs.Set(urllib2, 'urlopen', fakeurlopen)
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(test.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.rules = {
"default": [],
"example:exist": [["false:false"]]
}
self._set_brain('default')
self.context = context.RequestContext('fake', 'fake')
self.addCleanup(policy.reset)
def _set_brain(self, default_rule):
brain = cinder.openstack.common.policy.Brain(self.rules,
default_rule)
cinder.openstack.common.policy.set_brain(brain)
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_brain("default_noexist")
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
class ContextIsAdminPolicyTestCase(test.TestCase):
def setUp(self):
super(ContextIsAdminPolicyTestCase, self).setUp()
policy.reset()
policy.init()
def test_default_admin_role_is_admin(self):
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertTrue(ctx.is_admin)
def test_custom_admin_role_is_admin(self):
# define explict rules for context_is_admin
rules = {
'context_is_admin': [["role:administrator"], ["role:johnny-admin"]]
}
brain = common_policy.Brain(rules, CONF.policy_default_rule)
common_policy.set_brain(brain)
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertTrue(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['administrator'])
self.assertTrue(ctx.is_admin)
# default rule no longer applies
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertFalse(ctx.is_admin)
def test_context_is_admin_undefined(self):
rules = {
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": [["rule:admin_or_owner"]],
}
brain = common_policy.Brain(rules, CONF.policy_default_rule)
common_policy.set_brain(brain)
ctx = context.RequestContext('fake', 'fake')
self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin'])
self.assertTrue(ctx.is_admin)
|
|
# This file comprises the tests that are internally validated (as
# opposed to tests which produce output files that are externally
# validated). Primarily they are unittests.
# There is a read/write asymmetry: It is fairly easy to
# internally validate the results of reading a PNG file because we
# can know what pixels it should produce, but when writing a PNG
# file many choices are possible. The only thing we can do is read
# it back in again, which merely checks consistency, not that the
# PNG file we produce is valid.
# Run the tests from the command line:
# python -c 'import test_png;test_png.runTest()'
# If you have nose installed you can use that:
# nosetests .
import itertools
try:
import struct
except ImportError:
import ustruct as struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-unittest.html
import unittest
try:
import zlib
except ImportError:
import uzlib as zlib
from array import array
try:
from io import BytesIO
except ImportError:
from uio import BytesIO
try:
import numpy
except ImportError:
numpy = False
import png
import pngsuite
def runTest():
unittest.main(__name__)
#def topngbytes(name, rows, x, y, **k):
# """
# Convenience function for creating a PNG file "in memory" as
# a string. Creates a :class:`Writer` instance using the keyword
# arguments, then passes `rows` to its :meth:`Writer.write` method.
# The resulting PNG file is returned as a string. `name` is used
# to identify the file for debugging.
# """
#
# import os
#
# if os.environ.get('PYPNG_TEST_FILENAME'):
# print(name, file=sys.stderr)
# f = BytesIO()
# w = png.Writer(x, y, **k)
# w.write(f, rows)
# if os.environ.get('PYPNG_TEST_TMP'):
# w = open(name, 'wb')
# w.write(f.getvalue())
# w.close()
# return f.getvalue()
def _redirect_io(inp, out, f):
"""Calls the function `f` with ``sys.stdin`` changed to `inp`
and ``sys.stdout`` changed to `out`. They are restored when `f`
returns. This function returns whatever `f` returns.
"""
import os
import sys
oldin, sys.stdin = sys.stdin, inp
oldout, sys.stdout = sys.stdout, out
try:
x = f()
finally:
sys.stdin = oldin
sys.stdout = oldout
if os.environ.get('PYPNG_TEST_TMP') and hasattr(out,'getvalue'):
name = mycallersname()
if name:
w = open(name+'.png', 'wb')
w.write(out.getvalue())
w.close()
return x
def mycallersname():
"""Returns the name of the caller of the caller of this function
(hence the name of the caller of the function in which
"mycallersname()" textually appears). Returns None if this cannot
be determined.
"""
# http://docs.python.org/library/inspect.html#the-interpreter-stack
import inspect
frame = inspect.currentframe()
if not frame:
return None
frame_,filename_,lineno_,funname,linelist_,listi_ = (
inspect.getouterframes(frame)[2])
return funname
def seqtobytes(s):
"""Convert a sequence of integers to a *bytes* instance. Good for
plastering over Python 2 / Python 3 cracks.
"""
fmt = "{0}B".format(len(s))
return struct.pack(fmt, *s)
class Test(unittest.TestCase):
# This member is used by the superclass. If we don't define a new
# class here then when we use self.assertRaises() and the PyPNG code
# raises an assertion then we get no proper traceback. I can't work
# out why, but defining a new class here means we get a proper
# traceback.
class failureException(Exception):
pass
# def helperLN(self, n):
# mask = (1 << n) - 1
# # Use small chunk_limit so that multiple chunk writing is
# # tested. Making it a test for Issue 20 (googlecode).
# w = png.Writer(15, 17, greyscale=True, bitdepth=n, chunk_limit=99)
# f = BytesIO()
# w.write_array(f, array('B', map(mask.__and__, range(1, 256))))
# r = png.Reader(bytes=f.getvalue())
# x,y,pixels,meta = r.read()
# self.assertEqual(x, 15)
# self.assertEqual(y, 17)
# self.assertEqual(list(itertools.chain(*pixels)),
# [mask & x for x in range(1,256)])
# def testL8(self):
# return self.helperLN(8)
# def testL4(self):
# return self.helperLN(4)
# def testL2(self):
# "Also tests asRGB8."
# w = png.Writer(1, 4, greyscale=True, bitdepth=2)
# f = BytesIO()
# w.write_array(f, array('B', range(4)))
# r = png.Reader(bytes=f.getvalue())
# x,y,pixels,meta = r.asRGB8()
# self.assertEqual(x, 1)
# self.assertEqual(y, 4)
# for i,row in enumerate(pixels):
# self.assertEqual(len(row), 3)
# self.assertEqual(list(row), [0x55*i]*3)
# def testP2(self):
# "2-bit palette."
# a = (255,255,255)
# b = (200,120,120)
# c = (50,99,50)
# w = png.Writer(1, 4, bitdepth=2, palette=[a,b,c])
# f = BytesIO()
# w.write_array(f, array('B', (0,1,1,2)))
# r = png.Reader(bytes=f.getvalue())
# x,y,pixels,meta = r.asRGB8()
# self.assertEqual(x, 1)
# self.assertEqual(y, 4)
# self.assertEqual([list(row) for row in pixels],
# [list(row) for row in [a, b, b, c]])
# def testPtrns(self):
# "Test colour type 3 and tRNS chunk (and 4-bit palette)."
# a = (50,99,50,50)
# b = (200,120,120,80)
# c = (255,255,255)
# d = (200,120,120)
# e = (50,99,50)
# w = png.Writer(3, 3, bitdepth=4, palette=[a,b,c,d,e])
# f = BytesIO()
# w.write_array(f, array('B', (4, 3, 2, 3, 2, 0, 2, 0, 1)))
# r = png.Reader(bytes=f.getvalue())
# x,y,pixels,meta = r.asRGBA8()
# self.assertEqual(x, 3)
# self.assertEqual(y, 3)
# c = c+(255,)
# d = d+(255,)
# e = e+(255,)
# boxed = [(e,d,c),(d,c,a),(c,a,b)]
# flat = map(lambda row: itertools.chain(*row), boxed)
# self.assertEqual([list(row) for row in pixels],
# [list(row) for row in flat])
def testRGBtoRGBA(self):
"""asRGBA8() on colour type 2 source."""
# Test for Issue 26 (googlecode)
# Also test that png.Reader can take a "file-like" object.
r = png.Reader(file=BytesIO(pngsuite.basn2c08))
x,y,pixels,meta = r.asRGBA8()
# Test the pixels at row 9 columns 0 and 1.
row9 = list(pixels)[9]
self.assertEqual(list(row9[0:8]),
[0xff, 0xdf, 0xff, 0xff, 0xff, 0xde, 0xff, 0xff])
def testLtoRGBA(self):
"""asRGBA() on grey source."""
# Test for Issue 60 (googlecode)
r = png.Reader(bytes=pngsuite.basi0g08)
x,y,pixels,meta = r.asRGBA()
row9 = list(list(pixels)[9])
self.assertEqual(row9[0:8],
[222, 222, 222, 255, 221, 221, 221, 255])
# def testCtrns(self):
# "Test colour type 2 and tRNS chunk."
# # Test for Issue 25 (googlecode)
# r = png.Reader(bytes=pngsuite.tbrn2c08)
# x,y,pixels,meta = r.asRGBA8()
# # I just happen to know that the first pixel is transparent.
# # In particular it should be #7f7f7f00
# row0 = list(pixels)[0]
# self.assertEqual(tuple(row0[0:4]), (0x7f, 0x7f, 0x7f, 0x00))
def testAdam7read(self):
"""Adam7 interlace reading.
Specifically, test that for images in the PngSuite that
have both an interlaced and straightlaced pair that both
images from the pair produce the same array of pixels."""
for candidate in pngsuite.png:
if not candidate.startswith('basn'):
continue
candi = candidate.replace('n', 'i')
if candi not in pngsuite.png:
continue
straight = png.Reader(bytes=pngsuite.png[candidate])
adam7 = png.Reader(bytes=pngsuite.png[candi])
# Just compare the pixels. Ignore x,y (because they're
# likely to be correct?); metadata is ignored because the
# "interlace" member differs. Lame.
straight = straight.read()[2]
adam7 = adam7.read()[2]
self.assertEqual([list(row) for row in straight],
[list(row) for row in adam7])
# def testAdam7write(self):
# """Adam7 interlace writing.
# For each test image in the PngSuite, write an interlaced
# and a straightlaced version. Decode both, and compare results.
# """
# # Not such a great test, because the only way we can check what
# # we have written is to read it back again.
#
# for name,bytes in pngsuite.png.items():
# # Only certain colour types supported for this test.
# if name[3:5] not in ['n0', 'n2', 'n4', 'n6']:
# continue
# it = png.Reader(bytes=bytes)
# x,y,pixels,meta = it.read()
# pngi = topngbytes('adam7wn'+name+'.png', pixels,
# x=x, y=y, bitdepth=it.bitdepth,
# greyscale=it.greyscale, alpha=it.alpha,
# transparent=it.transparent,
# interlace=False)
# x,y,ps,meta = png.Reader(bytes=pngi).read()
# it = png.Reader(bytes=bytes)
# x,y,pixels,meta = it.read()
# pngs = topngbytes('adam7wi'+name+'.png', pixels,
# x=x, y=y, bitdepth=it.bitdepth,
# greyscale=it.greyscale, alpha=it.alpha,
# transparent=it.transparent,
# interlace=True)
# x,y,pi,meta = png.Reader(bytes=pngs).read()
# self.assertEqual([list(row) for row in ps],
# [list(row) for row in pi])
# def testPGMin(self):
# """Test that the command line tool can read PGM files."""
# def do():
# return png._main(['testPGMin'])
# s = BytesIO()
# s.write(b'P5 2 2 3\n')
# s.write(b'\x00\x01\x02\x03')
# s.flush()
# s.seek(0)
# o = BytesIO()
# _redirect_io(s, o, do)
# r = png.Reader(bytes=o.getvalue())
# x,y,pixels,meta = r.read()
# self.assertTrue(r.greyscale)
# self.assertEqual(r.bitdepth, 2)
# def testPAMin(self):
# """Test that the command line tool can read PAM file."""
# def do():
# return png._main(['testPAMin'])
# s = BytesIO()
# s.write(b'P7\nWIDTH 3\nHEIGHT 1\nDEPTH 4\nMAXVAL 255\n'
# b'TUPLTYPE RGB_ALPHA\nENDHDR\n')
# # The pixels in flat row flat pixel format
# flat = [255,0,0,255, 0,255,0,120, 0,0,255,30]
# asbytes = seqtobytes(flat)
# s.write(asbytes)
# s.flush()
# s.seek(0)
# o = BytesIO()
# _redirect_io(s, o, do)
# r = png.Reader(bytes=o.getvalue())
# x,y,pixels,meta = r.read()
# self.assertTrue(r.alpha)
# self.assertTrue(not r.greyscale)
# self.assertEqual(list(itertools.chain(*pixels)), flat)
# def testLA4(self):
# """Create an LA image with bitdepth 4."""
# bytes = topngbytes('la4.png', [[5, 12]], 1, 1,
# greyscale=True, alpha=True, bitdepth=4)
# sbit = png.Reader(bytes=bytes).chunk(b'sBIT')[1]
# self.assertEqual(sbit, b'\x04\x04')
def testPal(self):
"""Test that a palette PNG returns the palette in info."""
r = png.Reader(bytes=pngsuite.basn3p04)
x,y,pixels,info = r.read()
self.assertEqual(x, 32)
self.assertEqual(y, 32)
self.assertTrue('palette' in info)
# def testPalWrite(self):
# """Test metadata for paletted PNG can be passed from one PNG
# to another."""
# r = png.Reader(bytes=pngsuite.basn3p04)
# x,y,pixels,info = r.read()
# w = png.Writer(**info)
# o = BytesIO()
# w.write(o, pixels)
# o.flush()
# o.seek(0)
# r = png.Reader(file=o)
# _,_,_,again_info = r.read()
# # Same palette
# self.assertEqual(again_info['palette'], info['palette'])
# def testPalExpand(self):
# """Test that bitdepth can be used to fiddle with pallete image."""
# r = png.Reader(bytes=pngsuite.basn3p04)
# x,y,pixels,info = r.read()
# pixels = [list(row) for row in pixels]
# info['bitdepth'] = 8
# w = png.Writer(**info)
# o = BytesIO()
# w.write(o, pixels)
# o.flush()
# o.seek(0)
# r = png.Reader(file=o)
# _,_,again_pixels,again_info = r.read()
# # Same pixels
# again_pixels = [list(row) for row in again_pixels]
# self.assertEqual(again_pixels, pixels)
def testPaletteForcealpha(self):
"""Test forcing alpha channel for palette"""
r = png.Reader(bytes=pngsuite.basn3p04)
r.preamble()
r.palette(alpha='force')
# def testPNMsbit(self):
# """Test that PNM files can generates sBIT chunk."""
# def do():
# return png._main(['testPNMsbit'])
# s = BytesIO()
# s.write(b'P6 8 1 1\n')
# for pixel in range(8):
# s.write(struct.pack('<I', (0x4081*pixel)&0x10101)[:3])
# s.flush()
# s.seek(0)
# o = BytesIO()
# _redirect_io(s, o, do)
# r = png.Reader(bytes=o.getvalue())
# sbit = r.chunk(b'sBIT')[1]
# self.assertEqual(sbit, b'\x01\x01\x01')
# def testLtrns0(self):
# """Create greyscale image with tRNS chunk."""
# return self.helperLtrns(0)
# def testLtrns1(self):
# """Using 1-tuple for transparent arg."""
# return self.helperLtrns((0,))
# def helperLtrns(self, transparent):
# """Helper used by :meth:`testLtrns*`."""
# pixels = zip([0x00, 0x38, 0x4c, 0x54, 0x5c, 0x40, 0x38, 0x00])
# o = BytesIO()
# w = png.Writer(8, 8, greyscale=True, bitdepth=1, transparent=transparent)
# w.write_packed(o, pixels)
# r = png.Reader(bytes=o.getvalue())
# x,y,pixels,meta = r.asDirect()
# self.assertTrue(meta['alpha'])
# self.assertTrue(meta['greyscale'])
# self.assertEqual(meta['bitdepth'], 1)
# def testWinfo(self):
# """Test the dictionary returned by a `read` method can be used
# as args for :meth:`Writer`.
# """
# r = png.Reader(bytes=pngsuite.basn2c16)
# info = r.read()[3]
# w = png.Writer(**info)
# def testPackedIter(self):
# """Test iterator for row when using write_packed.
#
# Indicative for Issue 47 (googlecode).
# """
# w = png.Writer(16, 2, greyscale=True, alpha=False, bitdepth=1)
# o = BytesIO()
# w.write_packed(o, [itertools.chain([0x0a], [0xaa]),
# itertools.chain([0x0f], [0xff])])
# r = png.Reader(bytes=o.getvalue())
# x,y,pixels,info = r.asDirect()
# pixels = list(pixels)
# self.assertEqual(len(pixels), 2)
# self.assertEqual(len(pixels[0]), 16)
def testInterlacedArray(self):
"""Test that reading an interlaced PNG yields each row as an
array."""
r = png.Reader(bytes=pngsuite.basi0g08)
list(r.read()[2])[0]
def testTrnsArray(self):
"""Test that reading a type 2 PNG with tRNS chunk yields each
row as an array (using asDirect)."""
r = png.Reader(bytes=pngsuite.tbrn2c08)
list(r.asDirect()[2])[0]
# Invalid file format tests. These construct various badly
# formatted PNG files, then feed them into a Reader. When
# everything is working properly, we should get FormatError
# exceptions raised.
def testEmpty(self):
"""Test empty file."""
r = png.Reader(bytes=b'')
self.assertRaises(png.FormatError, r.asDirect)
def testSigOnly(self):
"""Test file containing just signature bytes."""
r = png.Reader(bytes=pngsuite.basi0g01[:8])
self.assertRaises(png.FormatError, r.asDirect)
def testChun(self):
"""
Chunk doesn't have length and type.
"""
r = png.Reader(bytes=pngsuite.basi0g01[:13])
try:
r.asDirect()
self.fail()
except Exception as e:
self.assertTrue(isinstance(e, png.FormatError))
self.assertTrue('chunk length' in str(e))
def testChunkShort(self):
"""
Chunk that is too short.
"""
r = png.Reader(bytes=pngsuite.basi0g01[:21])
try:
r.asDirect()
self.fail()
except Exception as e:
self.assertTrue(isinstance(e, png.FormatError))
self.assertTrue('EOF' in str(e))
def testNoChecksum(self):
"""
Chunk that's too small to contain a checksum.
"""
r = png.Reader(bytes=pngsuite.basi0g01[:29])
try:
r.asDirect()
self.fail()
except Exception as e:
self.assertTrue(isinstance(e, png.FormatError))
self.assertTrue('EOF' in str(e))
# def testExtraPixels(self):
# """Test file that contains too many pixels."""
#
# def eachchunk(chunk):
# if chunk[0] != b'IDAT':
# return chunk
# data = zlib.decompress(chunk[1])
# data += b'\x00garbage'
# data = zlib.compress(data)
# chunk = (chunk[0], data)
# return chunk
# self.assertRaises(png.FormatError, self.helperFormat, eachchunk)
#
# def testNotEnoughPixels(self):
# def eachchunk(chunk):
# if chunk[0] != b'IDAT':
# return chunk
# # Remove last byte.
# data = zlib.decompress(chunk[1])
# data = data[:-1]
# data = zlib.compress(data)
# return (chunk[0], data)
# self.assertRaises(png.FormatError, self.helperFormat, eachchunk)
#
# def helperFormat(self, f):
# r = png.Reader(bytes=pngsuite.basn0g01)
# o = BytesIO()
# def newchunks():
# for chunk in r.chunks():
# yield f(chunk)
# png.write_chunks(o, newchunks())
# r = png.Reader(bytes=o.getvalue())
# return list(r.asDirect()[2])
#
# def testBadFilter(self):
# def eachchunk(chunk):
# if chunk[0] != b'IDAT':
# return chunk
# data = zlib.decompress(chunk[1])
# # Corrupt the first filter byte
# data = b'\x99' + data[1:]
# data = zlib.compress(data)
# return (chunk[0], data)
# self.assertRaises(png.FormatError, self.helperFormat, eachchunk)
# def testFlat(self):
# """Test read_flat."""
# import hashlib
#
# r = png.Reader(bytes=pngsuite.basn0g02)
# x,y,pixel,meta = r.read_flat()
# d = hashlib.md5(seqtobytes(pixel)).hexdigest()
# self.assertEqual(d, '255cd971ab8cd9e7275ff906e5041aa0')
# def testfromarray(self):
# img = png.from_array([[0, 0x33, 0x66], [0xff, 0xcc, 0x99]], 'L')
# img.save(BytesIO())
# def testfromarray3D(self):
# img = png.from_array(
# [[[0, 0, 0], [255, 0, 0]],
# [[255, 0, 0], [0, 0, 0]]], 'RGB')
# img.save(BytesIO())
# def testfromarrayL16(self):
# img = png.from_array(group(range(2**16), 256), 'L;16')
# img.save(BytesIO())
# def testfromarrayRGB(self):
# img = png.from_array([[0,0,0, 0,0,1, 0,1,0, 0,1,1],
# [1,0,0, 1,0,1, 1,1,0, 1,1,1]], 'RGB;1')
# o = BytesIO()
# img.save(o)
# def testfromarrayIter(self):
# i = itertools.islice(itertools.count(10), 20)
# i = ([x, x, x] for x in i)
# img = png.from_array(i, 'RGB;5', dict(height=20))
# f = BytesIO()
# img.save(f)
# def testfromarrayWrong(self):
# try:
# png.from_array([[1]], 'gray')
# except png.Error:
# return
# assert 0, "Expected from_array() to raise png.Error exception"
# def testfromarrayShortMode(self):
# png.from_array([[0,1],[2,3]], 'L2').save(BytesIO())
# def testFromarrayLA(self):
# png.from_array([[3,1],[0,3]], 'LA2',
# info=dict(greyscale=True)).save(BytesIO())
# numpy dependent tests.
# def testNumpyuint16(self):
# """numpy uint16."""
#
# numpy or self.skipTest("numpy is not available")
#
# rows = [map(numpy.uint16, range(0,0x10000,0x5555))]
# b = topngbytes('numpyuint16.png', rows, 4, 1,
# greyscale=True, alpha=False, bitdepth=16)
#
# def testNumpyuint8(self):
# """numpy uint8."""
#
# numpy or self.skipTest("numpy is not available")
#
# rows = [map(numpy.uint8, range(0,0x100,0x55))]
# b = topngbytes('numpyuint8.png', rows, 4, 1,
# greyscale=True, alpha=False, bitdepth=8)
#
# def testNumpybool(self):
# """numpy bool."""
#
# numpy or self.skipTest("numpy is not available")
#
# rows = [map(numpy.bool, [0,1])]
# b = topngbytes('numpybool.png', rows, 2, 1,
# greyscale=True, alpha=False, bitdepth=1)
#
# def testNumpyarray(self):
# """numpy array."""
#
# numpy or self.skipTest("numpy is not available")
#
# pixels = numpy.array([[0,0x5555],[0x5555,0xaaaa]], numpy.uint16)
# img = png.from_array(pixels, 'L')
# img.save(BytesIO())
#
# def testNumpyPalette(self):
# """numpy palette."""
#
# numpy or self.skipTest("numpy is not available")
#
# s = ['110010010011',
# '101011010100',
# '110010110101',
# '100010010011']
#
# s = [[int(p) for p in row] for row in s]
#
# palette = [(0x55,0x55,0x55), (0xff,0x99,0x99)]
# pnp = numpy.array(palette) # creates a 2x3 array
# w = png.Writer(len(s[0]), len(s), palette=pnp, bitdepth=1)
def paeth(self, x, a, b, c):
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
return x - pr
# test filters and unfilters
# def testFilterScanlineFirstLine(self):
# fo = 3 # bytes per pixel
# line = [30, 31, 32, 230, 231, 232]
# out = png.filter_scanline(0, line, fo, None) # none
# self.assertEqual(list(out), [0, 30, 31, 32, 230, 231, 232])
# out = png.filter_scanline(1, line, fo, None) # sub
# self.assertEqual(list(out), [1, 30, 31, 32, 200, 200, 200])
# out = png.filter_scanline(2, line, fo, None) # up
# self.assertEqual(list(out), [2, 30, 31, 32, 230, 231, 232])
# out = png.filter_scanline(3, line, fo, None) # average
# self.assertEqual(list(out), [3, 30, 31, 32, 215, 216, 216])
# out = png.filter_scanline(4, line, fo, None) # paeth
# self.assertEqual(list(out), [
# 4, self.paeth(30, 0, 0, 0), self.paeth(31, 0, 0, 0),
# self.paeth(32, 0, 0, 0), self.paeth(230, 30, 0, 0),
# self.paeth(231, 31, 0, 0), self.paeth(232, 32, 0, 0)
# ])
# def testFilterScanline(self):
# prev = [20, 21, 22, 210, 211, 212]
# line = [30, 32, 34, 230, 233, 236]
# fo = 3
# out = png.filter_scanline(0, line, fo, prev) # none
# self.assertEqual(list(out), [0, 30, 32, 34, 230, 233, 236])
# out = png.filter_scanline(1, line, fo, prev) # sub
# self.assertEqual(list(out), [1, 30, 32, 34, 200, 201, 202])
# out = png.filter_scanline(2, line, fo, prev) # up
# self.assertEqual(list(out), [2, 10, 11, 12, 20, 22, 24])
# out = png.filter_scanline(3, line, fo, prev) # average
# self.assertEqual(list(out), [3, 20, 22, 23, 110, 112, 113])
# out = png.filter_scanline(4, line, fo, prev) # paeth
# self.assertEqual(list(out), [
# 4, self.paeth(30, 0, 20, 0), self.paeth(32, 0, 21, 0),
# self.paeth(34, 0, 22, 0), self.paeth(230, 30, 210, 20),
# self.paeth(233, 32, 211, 21), self.paeth(236, 34, 212, 22)
# ])
def testUnfilterScanline(self):
reader = png.Reader(bytes=b'')
reader.psize = 3
scanprev = array('B', [20, 21, 22, 210, 211, 212])
scanline = array('B', [30, 32, 34, 230, 233, 236])
def cp(a):
return array('B', a)
out = reader.undo_filter(0, cp(scanline), cp(scanprev))
self.assertEqual(list(out), list(scanline)) # none
out = reader.undo_filter(1, cp(scanline), cp(scanprev))
self.assertEqual(list(out), [30, 32, 34, 4, 9, 14]) # sub
out = reader.undo_filter(2, cp(scanline), cp(scanprev))
self.assertEqual(list(out), [50, 53, 56, 184, 188, 192]) # up
out = reader.undo_filter(3, cp(scanline), cp(scanprev))
self.assertEqual(list(out), [40, 42, 45, 99, 103, 108]) # average
out = reader.undo_filter(4, cp(scanline), cp(scanprev))
self.assertEqual(list(out), [50, 53, 56, 184, 188, 192]) # paeth
def testUnfilterScanlinePaeth(self):
# This tests more edge cases in the paeth unfilter
reader = png.Reader(bytes=b'')
reader.psize = 3
scanprev = array('B', [2, 0, 0, 0, 9, 11])
scanline = array('B', [6, 10, 9, 100, 101, 102])
out = reader.undo_filter(4, scanline, scanprev)
self.assertEqual(list(out), [8, 10, 9, 108, 111, 113]) # paeth
def testModifyRows(self):
# Tests that the rows yielded by the pixels generator
# can be safely modified.
k = 'f02n0g08'
r1 = png.Reader(bytes=pngsuite.png[k])
r2 = png.Reader(bytes=pngsuite.png[k])
_,_,pixels1,info1 = r1.asDirect()
_,_,pixels2,info2 = r2.asDirect()
izip = getattr(itertools, 'izip', zip)
for row1, row2 in izip(pixels1, pixels2):
self.assertEqual(row1, row2)
for i in range(len(row1)):
row1[i] = 11117 % (i + 1)
# def testPNMWrite(self):
# o = BytesIO()
# w,h = 3,3
# pixels = [[0, 1, 2],
# [3, 0, 1],
# [2, 3, 0]]
# meta = dict(alpha=False, greyscale=True, bitdepth=2, planes=1)
# png.write_pnm(o, w, h, pixels, meta)
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return list(zip(*[iter(s)]*n))
if __name__ == '__main__':
unittest.main(__name__)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:si:et:enc=utf-8
# Author: Ivan A-R <ivan@tuxotronic.org>
# Project page: http://tuxotronic.org/wiki/projects/stm32loader
#
# This file is part of stm32loader.
#
# stm32loader is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# stm32loader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with stm32loader; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
import sys, getopt
import serial
import time
try:
from progressbar import *
usepbar = 1
except:
usepbar = 0
# Verbose level
QUIET = 20
# these come from AN2606
chip_ids = {
0x412: "STM32 Low-density",
0x410: "STM32 Medium-density",
0x414: "STM32 High-density",
0x420: "STM32 Medium-density value line",
0x428: "STM32 High-density value line",
0x430: "STM32 XL-density",
0x416: "STM32 Medium-density ultralow power line",
0x411: "STM32F2xx",
0x413: "STM32F4xx",
}
def mdebug(level, message):
if(QUIET >= level):
print >> sys.stderr , message
class CmdException(Exception):
pass
class CommandInterface:
extended_erase = 0
def open(self, aport='/dev/tty.usbserial-ftCYPMYJ', abaudrate=115200) :
self.sp = serial.Serial(
port=aport,
baudrate=abaudrate, # baudrate
bytesize=8, # number of databits
parity=serial.PARITY_EVEN,
stopbits=1,
xonxoff=0, # don't enable software flow control
rtscts=0, # don't enable RTS/CTS flow control
timeout=5 # set a timeout value, None for waiting forever
)
def _wait_for_ask(self, info = ""):
# wait for ask
try:
ask = ord(self.sp.read())
except:
raise CmdException("Can't read port or timeout")
else:
if ask == 0x79:
# ACK
return 1
else:
if ask == 0x1F:
# NACK
raise CmdException("NACK "+info)
else:
# Unknown responce
raise CmdException("Unknown response. "+info+": "+hex(ask))
def reset(self):
self.sp.setDTR(0)
time.sleep(0.1)
self.sp.setDTR(1)
time.sleep(0.5)
def initChip(self):
# Set boot
self.sp.setRTS(0)
self.reset()
self.sp.write("\x7F") # Syncro
return self._wait_for_ask("Syncro")
def releaseChip(self):
self.sp.setRTS(1)
self.reset()
def cmdGeneric(self, cmd):
self.sp.write(chr(cmd))
self.sp.write(chr(cmd ^ 0xFF)) # Control byte
return self._wait_for_ask(hex(cmd))
def cmdGet(self):
if self.cmdGeneric(0x00):
mdebug(10, "*** Get command");
len = ord(self.sp.read())
version = ord(self.sp.read())
mdebug(10, " Bootloader version: "+hex(version))
dat = map(lambda c: hex(ord(c)), self.sp.read(len))
if '0x44' in dat:
self.extended_erase = 1
mdebug(10, " Available commands: "+", ".join(dat))
self._wait_for_ask("0x00 end")
return version
else:
raise CmdException("Get (0x00) failed")
def cmdGetVersion(self):
if self.cmdGeneric(0x01):
mdebug(10, "*** GetVersion command")
version = ord(self.sp.read())
self.sp.read(2)
self._wait_for_ask("0x01 end")
mdebug(10, " Bootloader version: "+hex(version))
return version
else:
raise CmdException("GetVersion (0x01) failed")
def cmdGetID(self):
if self.cmdGeneric(0x02):
mdebug(10, "*** GetID command")
len = ord(self.sp.read())
id = self.sp.read(len+1)
self._wait_for_ask("0x02 end")
return reduce(lambda x, y: x*0x100+y, map(ord, id))
else:
raise CmdException("GetID (0x02) failed")
def _encode_addr(self, addr):
byte3 = (addr >> 0) & 0xFF
byte2 = (addr >> 8) & 0xFF
byte1 = (addr >> 16) & 0xFF
byte0 = (addr >> 24) & 0xFF
crc = byte0 ^ byte1 ^ byte2 ^ byte3
return (chr(byte0) + chr(byte1) + chr(byte2) + chr(byte3) + chr(crc))
def cmdReadMemory(self, addr, lng):
assert(lng <= 256)
if self.cmdGeneric(0x11):
mdebug(10, "*** ReadMemory command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x11 address failed")
N = (lng - 1) & 0xFF
crc = N ^ 0xFF
self.sp.write(chr(N) + chr(crc))
self._wait_for_ask("0x11 length failed")
return map(lambda c: ord(c), self.sp.read(lng))
else:
raise CmdException("ReadMemory (0x11) failed")
def cmdGo(self, addr):
if self.cmdGeneric(0x21):
mdebug(10, "*** Go command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x21 go failed")
else:
raise CmdException("Go (0x21) failed")
def cmdWriteMemory(self, addr, data):
assert(len(data) <= 256)
if self.cmdGeneric(0x31):
mdebug(10, "*** Write memory command")
self.sp.write(self._encode_addr(addr))
self._wait_for_ask("0x31 address failed")
#map(lambda c: hex(ord(c)), data)
lng = (len(data)-1) & 0xFF
mdebug(10, " %s bytes to write" % [lng+1]);
self.sp.write(chr(lng)) # len really
crc = 0xFF
for c in data:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x31 programming failed")
mdebug(10, " Write memory done")
else:
raise CmdException("Write memory (0x31) failed")
def cmdEraseMemory(self, sectors = None):
if self.extended_erase:
return cmd.cmdExtendedEraseMemory()
if self.cmdGeneric(0x43):
mdebug(10, "*** Erase memory command")
if sectors is None:
# Global erase
self.sp.write(chr(0xFF))
self.sp.write(chr(0x00))
else:
# Sectors erase
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x43 erasing failed")
mdebug(10, " Erase memory done")
else:
raise CmdException("Erase memory (0x43) failed")
def cmdExtendedEraseMemory(self):
if self.cmdGeneric(0x44):
mdebug(10, "*** Extended Erase memory command")
# Global mass erase
self.sp.write(chr(0xFF))
self.sp.write(chr(0xFF))
# Checksum
self.sp.write(chr(0x00))
tmp = self.sp.timeout
self.sp.timeout = 30
print "Extended erase (0x44), this can take ten seconds or more"
self._wait_for_ask("0x44 erasing failed")
self.sp.timeout = tmp
mdebug(10, " Extended Erase memory done")
else:
raise CmdException("Extended Erase memory (0x44) failed")
def cmdWriteProtect(self, sectors):
if self.cmdGeneric(0x63):
mdebug(10, "*** Write protect command")
self.sp.write(chr((len(sectors)-1) & 0xFF))
crc = 0xFF
for c in sectors:
crc = crc ^ c
self.sp.write(chr(c))
self.sp.write(chr(crc))
self._wait_for_ask("0x63 write protect failed")
mdebug(10, " Write protect done")
else:
raise CmdException("Write Protect memory (0x63) failed")
def cmdWriteUnprotect(self):
if self.cmdGeneric(0x73):
mdebug(10, "*** Write Unprotect command")
self._wait_for_ask("0x73 write unprotect failed")
self._wait_for_ask("0x73 write unprotect 2 failed")
mdebug(10, " Write Unprotect done")
else:
raise CmdException("Write Unprotect (0x73) failed")
def cmdReadoutProtect(self):
if self.cmdGeneric(0x82):
mdebug(10, "*** Readout protect command")
self._wait_for_ask("0x82 readout protect failed")
self._wait_for_ask("0x82 readout protect 2 failed")
mdebug(10, " Read protect done")
else:
raise CmdException("Readout protect (0x82) failed")
def cmdReadoutUnprotect(self):
if self.cmdGeneric(0x92):
mdebug(10, "*** Readout Unprotect command")
self._wait_for_ask("0x92 readout unprotect failed")
self._wait_for_ask("0x92 readout unprotect 2 failed")
mdebug(10, " Read Unprotect done")
else:
raise CmdException("Readout unprotect (0x92) failed")
# Complex commands section
def readMemory(self, addr, lng):
data = []
if usepbar:
widgets = ['Reading: ', Percentage(),', ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets,maxval=lng, term_width=79).start()
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, 256)
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Read %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
data = data + self.cmdReadMemory(addr, lng)
return data
def writeMemory(self, addr, data):
lng = len(data)
if usepbar:
widgets = ['Writing: ', Percentage(),' ', ETA(), ' ', Bar()]
pbar = ProgressBar(widgets=widgets, maxval=lng, term_width=79).start()
offs = 0
while lng > 256:
if usepbar:
pbar.update(pbar.maxval-lng)
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+256])
offs = offs + 256
addr = addr + 256
lng = lng - 256
if usepbar:
pbar.update(pbar.maxval-lng)
pbar.finish()
else:
mdebug(5, "Write %(len)d bytes at 0x%(addr)X" % {'addr': addr, 'len': 256})
self.cmdWriteMemory(addr, data[offs:offs+lng] + ([0xFF] * (256-lng)) )
def __init__(self) :
pass
def usage():
print """Usage: %s [-hqVewvr] [-l length] [-p port] [-b baud] [-a addr] [-g addr] [file.bin]
-h This help
-q Quiet
-V Verbose
-e Erase
-w Write
-v Verify
-r Read
-l length Length of read
-p port Serial port (default: /dev/tty.usbserial-ftCYPMYJ)
-b baud Baud speed (default: 115200)
-a addr Target address
-g addr Address to start running at (0x08000000, usually)
./stm32loader.py -e -w -v example/main.bin
""" % sys.argv[0]
if __name__ == "__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
print "Using Psyco..."
except ImportError:
pass
conf = {
'port': 'COM20',
'baud': 115200,
'address': 0x08000000,
'erase': 0,
'write': 0,
'verify': 0,
'read': 0,
'go_addr':-1,
}
# http://www.python.org/doc/2.5.2/lib/module-getopt.html
try:
opts, args = getopt.getopt(sys.argv[1:], "hqVewvrp:b:a:l:g:")
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
QUIET = 5
for o, a in opts:
if o == '-V':
QUIET = 10
elif o == '-q':
QUIET = 0
elif o == '-h':
usage()
sys.exit(0)
elif o == '-e':
conf['erase'] = 1
elif o == '-w':
conf['write'] = 1
elif o == '-v':
conf['verify'] = 1
elif o == '-r':
conf['read'] = 1
elif o == '-p':
conf['port'] = a
elif o == '-b':
conf['baud'] = eval(a)
elif o == '-a':
conf['address'] = eval(a)
elif o == '-g':
conf['go_addr'] = eval(a)
elif o == '-l':
conf['len'] = eval(a)
else:
assert False, "unhandled option"
cmd = CommandInterface()
cmd.open(conf['port'], conf['baud'])
mdebug(10, "Open port %(port)s, baud %(baud)d" % {'port':conf['port'], 'baud':conf['baud']})
try:
try:
cmd.initChip()
except:
print "Can't init. Ensure that BOOT0 is enabled and reset device"
bootversion = cmd.cmdGet()
mdebug(0, "Bootloader version %X" % bootversion)
id = cmd.cmdGetID()
mdebug(0, "Chip id: 0x%x (%s)" % (id, chip_ids.get(id, "Unknown")))
# cmd.cmdGetVersion()
# cmd.cmdGetID()
# cmd.cmdReadoutUnprotect()
# cmd.cmdWriteUnprotect()
# cmd.cmdWriteProtect([0, 1])
if (conf['write'] or conf['verify']):
data = map(lambda c: ord(c), file(args[0], 'rb').read())
if conf['erase']:
cmd.cmdEraseMemory()
if conf['write']:
cmd.writeMemory(conf['address'], data)
if conf['verify']:
verify = cmd.readMemory(conf['address'], len(data))
if(data == verify):
print "Verification OK"
else:
print "Verification FAILED"
print str(len(data)) + ' vs ' + str(len(verify))
for i in xrange(0, len(data)):
if data[i] != verify[i]:
print hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i])
if not conf['write'] and conf['read']:
rdata = cmd.readMemory(conf['address'], conf['len'])
file(args[0], 'wb').write(''.join(map(chr,rdata)))
if conf['go_addr'] != -1:
cmd.cmdGo(conf['go_addr'])
finally:
cmd.releaseChip()
|
|
#!/usr/bin/python3
from jsonschema import validate
schema = {
"$schema": "http://json-schema.org/schema#",
"description": "Schema for border router information",
"title": "Border Router Information",
"type": "object",
"properties": {
"information": {
"type": "object",
"oneOf": [
{ "$ref": "#/definitions/interface_configuration" },
{ "$ref": "#/definitions/rpl_configuration" },
{ "$ref": "#/definitions/neighbors" },
{ "$ref": "#/definitions/routes" },
{ "$ref": "#/definitions/topology" }
]
}
},
"definitions": {
"interface_configuration": {
"title": "Network Interface Configuration options",
"type": "array",
"items": {
"type": "object",
"items" : {
"type": "array",
"values": {
"oneOf": [
{ "$ref": "#/unicast/ipv6" },
{ "$ref": "#/unicast/ipv4" },
{ "$ref": "#/multicast/mcast_ipv6" },
{ "$ref": "#/multicast/mcast_ipv4" }
]
}
}
}
},
"rpl_configuration": {
"title": "RPL Configuration options",
"type": "array",
"items": {
"type": "object",
"items" : {
"RPL mode" : "string",
"Objective function" : "string",
"Routing metric" : "string",
"Mode of operation" : "string",
"Send probes to nodes" : "string",
"Max instances" : "string",
"Max DAG / instance" : "string",
"Min hop rank increment" : "string",
"Initial link metric" : "string",
"RPL preference value" : "string",
"DAG grounded by default" : "string",
"Default instance id" : "string",
"Insert hop-by-hop option" : "string",
"Specify DAG when sending DAO" : "string",
"DIO min interval" : "string",
"DIO doublings interval" : "string",
"DIO redundancy value" : "string",
"DAO send timer" : "string",
"DAO max retransmissions" : "string",
"DAO ack expected" : "string",
"DIS send periodically" : "string",
"DIS interval" : "string",
"Default route lifetime unit" : "string",
"Default route lifetime" : "string",
"Multicast MOP3 route lifetime" : "string"
}
}
},
"neighbors": {
"title": "Neighbor information",
"type": "array",
"items": {
"type": "object",
"items" : {
"Operation" : "string",
"IPv6 address": "string",
"Link address": "string",
"Is router": "string",
"Link metric": "string",
}
}
},
"routes": {
"title": "Network routes",
"type": "array",
"items": {
"type": "object",
"items" : {
"Operation" : "string",
"IPv6 prefix": "string",
"IPv6 address": "string",
"Link address": "string"
}
}
},
"topology": {
"type": "object",
"properties": {
"nodes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"label": {
"type": "string"
},
"title": {
"type": "string"
}
}
}
},
"edges": {
"type": "array",
"items": {
"type": "object",
"properties": {
"from": {
"type": "integer"
},
"to": {
"type": "integer"
}
}
}
}
}
}
},
"unicast": {
"ipv6": {
"title": "IPv6 addresses",
"type": "array",
"items": {
"state": "string",
"type": "string",
"lifetime": "string"
}
},
"ipv4": {
"title": "IPv4 addresses",
"type": "array",
"items": {
"state": "string",
"type": "string",
"lifetime": "string"
}
}
},
"multicast": {
"mcast_ipv6": {
"title": "IPv6 addresses",
"type": "array",
"items": {
"address": "string"
}
},
"mcast_ipv4": {
"title": "IPv4 addresses",
"type": "array",
"items": {
"value": "string"
}
}
}
}
validate(
{
"interface_configuration": [
{
"0x20009000": [
{
"Type" : "Ethernet"
},
{
"Link address" : "00:04:9F:2A:00:01"
},
{
"MTU" : "1500"
},
{
"IPv6" : [
{
"fe80::204:9fff:fe2a:1" : {
"state": "preferred",
"type": "autoconf",
"lifetime": "infinite"
}
},
{
"2001:db8::1" : {
"state": "preferred",
"type": "manual",
"lifetime": "infinite"
}
},
]
},
{
"IPv4" : [
{
"192.0.2.1" : {
"state": "preferred",
"type": "autoconf",
"lifetime": "infinite"
}
}
]
},
{
"IPv6 multicast" :
[
"ff02::1",
"ff02::1:ff2a:1",
"ff02::1:ff00:1"
]
},
{
"IPv4 multicast" :
[
"224.0.0.251"
]
},
{
"IPv4 gateway" : "0.0.0.0"
},
{
"IPv4 netmask" : "255.255.255.0"
},
]
},
{
"0x20009a30": [
{ "Type" : "IEEE 802.15.4" }
]
}
],
"neighbors": [
{
"0x20009000": [
{
"IPv6 address": "2001:db8::1",
"Link address": "00:11:22:33:44:55:66:77",
"Is router": "true",
"Link metric": "0"
},
{
"IPv6 address": "2001:db8::2",
"Link address": "77:11:22:33:44:55:66:00",
"Is router": "false",
"Link metric": "1"
},
{
"Operation" : "delete",
"IPv6 address": "2001:db8::3"
}
]
},
{
"0x20009a30": []
}
],
"rpl_configuration": [
{ "RPL mode" : "mesh" },
{ "Objective function" : "MRHOF" },
{ "Routing metric" : "none" },
{ "Mode of operation" : "MOP2" },
{ "Send probes to nodes" : "false" },
{ "Max instances" : "1" },
{ "Max DAG / instance" : "2" },
{ "Min hop rank increment" : "256" },
{ "Initial link metric" : "2" },
{ "RPL preference value" : "0" },
{ "DAG grounded by default" : "false" },
{ "Default instance id" : "42" },
{ "Insert hop-by-hop option" : "true" },
{ "Specify DAG when sending DAO" : "true" },
{ "DIO min interval" : "12" },
{ "DIO doublings interval" : "8" },
{ "DIO redundancy value" : "10" },
{ "DAO send timer" : "4" },
{ "DAO max retransmissions" : "4" },
{ "DAO ack expected" : "true" },
{ "DIS send periodically" : "true" },
{ "DIS interval" : "60" },
{ "Default route lifetime unit" : "65535" },
{ "Default route lifetime" : "255" },
{ "Multicast MOP3 route lifetime" : "0" }
],
"routes": [
{
"0x20009000": [
{
"IPv6 prefix" : "fde3:2cda:3eea:4d14:212:4b00:0:2/128",
"IPv6 address" : "fe80::212:4b00:0:2",
"Link address" : "00:12:4B:00:00:00:00:02"
},
{
"Operation" : "delete",
"IPv6 prefix" : "fde3:2cda:3eea:4d14:212:4b00:0:3/128"
}
]
},
{
"0x20009a30": [
]
}
],
"topology" : {
"nodes": [
{
"id": 1,
"label": "N1",
"title": "Node 1"
},
{
"id": 2,
"label": "N2",
"title": "Node 2"
}
],
"edges": [
{
"from": 1,
"to": 2
}
]
}
}, schema)
|
|
import sys
MEM_SIZE = 0xffff
BYTE_SIZE = 0xff
class SAP2Emulator:
def __init__(self):
self._pc = 0x0000
self._a = 0x00
self._b = 0x00
self._c = 0x00
self._s = False
self._z = False
self._mem = bytearray(MEM_SIZE + 1)
self._halt = False
self._seg = False
self._instructions = {
0x80: self._add_b,
0x81: self._add_c,
0xa0: self._ana_b,
0xa1: self._ana_c,
0xe6: self._ani,
0xcd: self._call,
0x2f: self._cma,
0x3d: self._dcr_a,
0x05: self._dcr_b,
0x0d: self._dcr_c,
0x76: self._hlt,
0xdb: self._in,
0x3c: self._inr_a,
0x04: self._inr_b,
0x0c: self._inr_c,
0xfa: self._jm,
0xc3: self._jmp,
0xc2: self._jnz,
0xca: self._jz,
0x3a: self._lda,
0x78: self._mov_ab,
0x79: self._mov_ac,
0x47: self._mov_ba,
0x41: self._mov_bc,
0x4f: self._mov_ca,
0x48: self._mov_cb,
0x3e: self._mvi_a,
0x06: self._mvi_b,
0x0e: self._mvi_c,
0x00: self._nop,
0xb0: self._ora_b,
0xb1: self._ora_c,
0xf6: self._ori,
0xd3: self._out,
0x17: self._ral,
0x1f: self._rar,
0xc9: self._ret,
0x32: self._sta,
0x90: self._sub_b,
0x91: self._sub_c,
0xa8: self._xra_b,
0xa9: self._xra_c,
0xee: self._xri
}
def load(self, start: int, binary: bytes):
self._mem[start:len(binary)] = binary
def start(self):
while not self._halt and not self._seg:
self._instructions[self._mem[self._pc]]()
if self._seg:
raise SegfaultError(self)
def _increment_pc(self):
self._pc += 1
if self._pc > MEM_SIZE:
self._seg = True
raise SegfaultError(self)
def _set_flags(self):
if self._a == 0:
self._s = False
self._z = True
else:
self._s = bool(self._a & 0x80)
self._z = False
# Instructions
def _add_b(self):
self._increment_pc()
self._a = self._a + self._b
self._set_flags()
def _add_c(self):
self._increment_pc()
self._a = self._a + self._c
self._set_flags()
def _ana_b(self):
self._increment_pc()
self._a = self._a & self._b
self._set_flags()
def _ana_c(self):
self._increment_pc()
self._a = self._a & self._c
self._set_flags()
def _ani(self):
self._increment_pc()
self._a = self._a & self._mem[self._pc]
self._increment_pc()
self._set_flags()
def _call(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
self._increment_pc()
self._mem[0xfffe] = self._pc & BYTE_SIZE
self._mem[0xffff] = (self._pc >> 8) & BYTE_SIZE
self._pc = msb + lsb
def _cma(self):
self._increment_pc()
self._a = -self._a
self._set_flags()
def _dcr_a(self):
self._increment_pc()
self._a -= 1
self._set_flags()
def _dcr_b(self):
self._a = self._b
self._dcr_a()
def _dcr_c(self):
self._a = self._c
self._dcr_a()
def _hlt(self):
self._halt = True
def _in(self):
self._increment_pc()
self._a = ord(sys.stdin.read()) & BYTE_SIZE
def _inr_a(self):
self._increment_pc()
self._a += 1
self._set_flags()
def _inr_b(self):
self._a = self._b
self._inr_a()
def _inr_c(self):
self._a = self._c
self._inr_a()
def _jm(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
if self._s:
self._pc = msb + lsb
else:
self._increment_pc()
def _jmp(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
self._pc = msb + lsb
def _jnz(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
if not self._z:
self._pc = msb + lsb
else:
self._increment_pc()
def _jz(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
if self._z:
self._pc = msb + lsb
else:
self._increment_pc()
def _lda(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
self._increment_pc()
self._a = self._mem[msb + lsb]
def _mov_ab(self):
self._increment_pc()
self._a = self._b
self._set_flags()
def _mov_ac(self):
self._increment_pc()
self._a = self._c
self._set_flags()
def _mov_ba(self):
self._increment_pc()
self._b = self._a
def _mov_bc(self):
self._increment_pc()
self._b = self._c
def _mov_ca(self):
self._increment_pc()
self._c = self._a
def _mov_cb(self):
self._increment_pc()
self._c = self._b
def _mvi_a(self):
self._increment_pc()
self._a = self._mem[self._pc]
self._increment_pc()
self._set_flags()
def _mvi_b(self):
self._increment_pc()
self._b = self._mem[self._pc]
self._increment_pc()
def _mvi_c(self):
self._increment_pc()
self._c = self._mem[self._pc]
self._increment_pc()
def _nop(self):
self._increment_pc()
def _ora_b(self):
self._increment_pc()
self._a = self._a | self._b
self._set_flags()
def _ora_c(self):
self._increment_pc()
self._a = self._a | self._c
self._set_flags()
def _ori(self):
self._increment_pc()
self._a = self._a | self._mem[self._pc]
self._increment_pc()
self._set_flags()
def _out(self):
self._increment_pc()
sys.stdout.write("%s" % self._a)
self._increment_pc()
def _ral(self):
# TODO: IMPLEMENT
self._increment_pc()
def _rar(self):
# TODO: IMPLEMENT
self._increment_pc()
def _ret(self):
self._pc = self._mem[0xffff] + self._mem[0xfffe]
def _sta(self):
self._increment_pc()
lsb = self._mem[self._pc]
self._increment_pc()
msb = self._mem[self._pc] << 8
self._increment_pc()
self._mem[msb + lsb] = self._a
def _sub_b(self):
self._increment_pc()
self._a = self._a - self._b
self._set_flags()
def _sub_c(self):
self._increment_pc()
self._a = self._a - self._c
self._set_flags()
def _xra_b(self):
self._increment_pc()
self._a = self._a ^ self._b
self._set_flags()
def _xra_c(self):
self._increment_pc()
self._a = self._a ^ self._c
self._set_flags()
def _xri(self):
self._increment_pc()
self._a = self._a ^ self._mem[self._pc]
self._increment_pc()
self._set_flags()
class SegfaultError(Exception):
def __init__(self, emulator: SAP2Emulator):
super(SegfaultError, self).__init__(emulator)
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the Test engine."""
import collections
import copy
import jsonschema
import mock
from rally import consts
from rally import exceptions
from rally.task import engine
from tests.unit import fakes
from tests.unit import test
class TestException(exceptions.RallyException):
msg_fmt = "TestException"
class TaskEngineTestCase(test.TestCase):
@mock.patch("rally.task.engine.TaskConfig")
def test_init(self, mock_task_config):
config = mock.MagicMock()
task = mock.MagicMock()
mock_task_config.return_value = fake_task_instance = mock.MagicMock()
eng = engine.TaskEngine(config, task)
mock_task_config.assert_has_calls([mock.call(config)])
self.assertEqual(eng.config, fake_task_instance)
self.assertEqual(eng.task, task)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("jsonschema.validate")
def test_validate(self, mock_validate, mock_task_config):
mock_task_config.return_value = config = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
mock_validate = mock.MagicMock()
eng._validate_config_scenarios_name = mock_validate.names
eng._validate_config_syntax = mock_validate.syntax
eng._validate_config_semantic = mock_validate.semantic
eng.validate()
expected_calls = [
mock.call.names(config),
mock.call.syntax(config),
mock.call.semantic(config)
]
mock_validate.assert_has_calls(expected_calls)
def test_validate__wrong_schema(self):
config = {
"wrong": True
}
task = mock.MagicMock()
self.assertRaises(exceptions.InvalidTaskException,
engine.TaskEngine, config, task)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.TaskConfig")
def test_validate__wrong_scenarios_name(self, mock_task_config):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task)
eng._validate_config_scenarios_name = mock.MagicMock(
side_effect=exceptions.NotFoundScenarios)
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.TaskConfig")
def test_validate__wrong_syntax(self, mock_task_config):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task)
eng._validate_config_scenarios_name = mock.MagicMock()
eng._validate_config_syntax = mock.MagicMock(
side_effect=exceptions.InvalidTaskConfig)
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.TaskConfig")
def test_validate__wrong_semantic(self, mock_task_config):
task = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), task)
eng._validate_config_scenarios_name = mock.MagicMock()
eng._validate_config_syntax = mock.MagicMock()
eng._validate_config_semantic = mock.MagicMock(
side_effect=exceptions.InvalidTaskConfig)
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.get_all")
def test__validate_config_scenarios_name(
self, mock_scenario_get_all, mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "a"},
{"name": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
mock_scenario_get_all.return_value = [
mock.MagicMock(get_name=lambda: "e"),
mock.MagicMock(get_name=lambda: "b"),
mock.MagicMock(get_name=lambda: "a")
]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
eng._validate_config_scenarios_name(mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario")
def test__validate_config_scenarios_name_non_exsisting(
self, mock_scenario, mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "exist"},
{"name": "nonexist1"},
{"name": "nonexist2"}
]
mock_task_instance.subtasks = [mock_subtask]
mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
self.assertRaises(exceptions.NotFoundScenarios,
eng._validate_config_scenarios_name,
mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
@mock.patch("rally.task.engine.context.ContextManager.validate")
def test__validate_config_syntax(
self, mock_context_manager_validate,
mock_scenario_runner_validate,
mock_task_config
):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "sca", "context": "a"},
{"name": "scb", "runner": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
eng._validate_config_syntax(mock_task_instance)
mock_scenario_runner_validate.assert_has_calls(
[mock.call({}), mock.call("b")], any_order=True)
mock_context_manager_validate.assert_has_calls(
[mock.call("a", non_hidden=True), mock.call({}, non_hidden=True)],
any_order=True)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.validate")
def test__validate_config_syntax__wrong_runner(
self, mock_context_manager_validate,
mock_scenario_runner, mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "sca", "context": "a"},
{"name": "scb", "runner": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
mock_scenario_runner.validate = mock.MagicMock(
side_effect=jsonschema.ValidationError("a"))
self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
@mock.patch("rally.task.engine.context.ContextManager")
def test__validate_config_syntax__wrong_context(
self, mock_context_manager, mock_scenario_runner_validate,
mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "sca", "context": "a"},
{"name": "scb", "runner": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
mock_context_manager.validate = mock.MagicMock(
side_effect=jsonschema.ValidationError("a"))
self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_syntax, mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.validate")
def test__validate_config_semantic_helper(self, mock_scenario_validate,
mock_task_config):
deployment = mock.MagicMock()
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
eng._validate_config_semantic_helper("admin", "user", "name", "pos",
deployment, {"args": "args"})
mock_scenario_validate.assert_called_once_with(
"name", {"args": "args"}, admin="admin", users=["user"],
deployment=deployment)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.validate",
side_effect=exceptions.InvalidScenarioArgument)
def test__validate_config_semanitc_helper_invalid_arg(
self, mock_scenario_validate, mock_task_config):
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
self.assertRaises(exceptions.InvalidTaskConfig,
eng._validate_config_semantic_helper, "a", "u", "n",
"p", mock.MagicMock(), {})
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.existing_users.ExistingUsers")
def test_get_user_ctx_for_validation_existing_users(
self, mock_existing_users, mock_task_config):
context = {"a": 10}
users = [mock.MagicMock(), mock.MagicMock()]
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock(),
users=users)
result = eng._get_user_ctx_for_validation(context)
self.assertEqual(context["config"]["existing_users"], users)
mock_existing_users.assert_called_once_with(context)
self.assertEqual(mock_existing_users.return_value, result)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.osclients.Clients")
@mock.patch("rally.task.engine.users_ctx")
@mock.patch("rally.task.engine.TaskEngine"
"._validate_config_semantic_helper")
@mock.patch("rally.task.engine.objects.Deployment.get",
return_value="FakeDeployment")
def test__validate_config_semantic(
self, mock_deployment_get,
mock__validate_config_semantic_helper,
mock_users_ctx, mock_clients, mock_task_config):
mock_users_ctx.UserGenerator = fakes.FakeUserContext
mock_clients.return_value = mock.MagicMock()
mock_task_instance = mock.MagicMock()
mock_subtask1 = mock.MagicMock()
mock_subtask1.scenarios = [
{"name": "a", "kw": 0},
{"name": "a", "kw": 1}
]
mock_subtask2 = mock.MagicMock()
mock_subtask2.scenarios = [
{"name": "b", "kw": 0},
]
mock_task_instance.subtasks = [mock_subtask1, mock_subtask2]
fake_task = mock.MagicMock()
eng = engine.TaskEngine(mock_task_instance, fake_task)
eng.admin = "admin"
eng._validate_config_semantic(mock_task_instance)
expected_calls = [
mock.call("admin"),
mock.call(fakes.FakeUserContext.user["endpoint"])
]
mock_clients.assert_has_calls(expected_calls)
mock_deployment_get.assert_called_once_with(fake_task["uuid"])
admin = user = mock_clients.return_value
fake_deployment = mock_deployment_get.return_value
expected_calls = [
mock.call(admin, user, "a", 0, fake_deployment,
{"name": "a", "kw": 0}),
mock.call(admin, user, "a", 1, fake_deployment,
{"name": "a", "kw": 1}),
mock.call(admin, user, "b", 0, fake_deployment,
{"name": "b", "kw": 0})
]
mock__validate_config_semantic_helper.assert_has_calls(
expected_calls, any_order=True)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__update_status(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_config, mock_task_get_status):
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTING
eng = engine.TaskEngine(mock.MagicMock(), task)
eng.run()
task.update_status.assert_has_calls([
mock.call(consts.TaskStatus.RUNNING),
mock.call(consts.TaskStatus.FINISHED)
])
@mock.patch("rally.task.engine.objects.task.Task.get_status")
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run_exception_is_logged(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario, mock_result_consumer,
mock_log, mock_task_config, mock_task_get_status):
mock_context_manager_setup.side_effect = Exception
mock_result_consumer.is_task_in_aborting_status.return_value = False
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "a.task", "context": {"context_a": {"a": 1}}},
{"name": "b.task", "context": {"context_b": {"b": 2}}}
]
mock_task_instance.subtasks = [mock_subtask]
mock_task_config.return_value = mock_task_instance
eng = engine.TaskEngine(mock.MagicMock(), mock.MagicMock())
eng.run()
self.assertEqual(2, mock_log.exception.call_count)
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__task_soft_aborted(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer):
task = mock.MagicMock()
mock_result_consumer.is_task_in_aborting_status.side_effect = [False,
False,
True]
config = {
"a.task": [{"runner": {"type": "a", "b": 1}}],
"b.task": [{"runner": {"type": "a", "b": 1}}],
"c.task": [{"runner": {"type": "a", "b": 1}}]
}
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task)
eng.run()
self.assertEqual(2, fake_runner.run.call_count)
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
def test_run__task_aborted(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
task = mock.MagicMock()
config = {
"a.task": [{"runner": {"type": "a", "b": 1}}],
"b.task": [{"runner": {"type": "a", "b": 1}}],
"c.task": [{"runner": {"type": "a", "b": 1}}]
}
fake_runner_cls = mock.MagicMock()
fake_runner = mock.MagicMock()
fake_runner_cls.return_value = fake_runner
mock_task_get_status.return_value = consts.TaskStatus.SOFT_ABORTING
mock_scenario_runner.get.return_value = fake_runner_cls
eng = engine.TaskEngine(config, task)
eng.run()
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.get")
def test__prepare_context(self, mock_scenario_get, mock_task_config):
default_context = {"a": 1, "b": 2}
mock_scenario_get.return_value._meta_get.return_value = default_context
task = mock.MagicMock()
name = "a.task"
context = {"b": 3, "c": 4}
endpoint = mock.MagicMock()
config = {
"a.task": [{"context": {"context_a": {"a": 1}}}],
}
eng = engine.TaskEngine(config, task)
result = eng._prepare_context(context, name, endpoint)
expected_context = copy.deepcopy(default_context)
expected_context.setdefault("users", {})
expected_context.update(context)
expected_result = {
"task": task,
"admin": {"endpoint": endpoint},
"scenario_name": name,
"config": expected_context
}
self.assertEqual(result, expected_result)
mock_scenario_get.assert_called_once_with(name)
mock_scenario_get.return_value._meta_get.assert_called_once_with(
"default_context"
)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.get")
def test__prepare_context_with_existing_users(self, mock_scenario_get,
mock_task_config):
mock_scenario_get.return_value._meta_get.return_value = {}
task = mock.MagicMock()
name = "a.task"
context = {"b": 3, "c": 4}
endpoint = mock.MagicMock()
config = {
"a.task": [{"context": {"context_a": {"a": 1}}}],
}
existing_users = [mock.MagicMock()]
eng = engine.TaskEngine(config, task, users=existing_users)
result = eng._prepare_context(context, name, endpoint)
expected_context = {"existing_users": existing_users}
expected_context.update(context)
expected_result = {
"task": task,
"admin": {"endpoint": endpoint},
"scenario_name": name,
"config": expected_context
}
self.assertEqual(result, expected_result)
mock_scenario_get.assert_called_once_with(name)
mock_scenario_get.return_value._meta_get.assert_called_once_with(
"default_context")
class ResultConsumerTestCase(test.TestCase):
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock()
runner = mock.MagicMock()
results = [
{"duration": 1, "timestamp": 3},
{"duration": 2, "timestamp": 2}
]
runner.result_queue = collections.deque(results)
with engine.ResultConsumer(
key, task, runner, False) as consumer_obj:
pass
self.assertEqual(list(map(mock.call, results)),
mock_sla_instance.add_iteration.mock_calls)
self.assertEqual(sorted(results, key=lambda x: x["timestamp"]),
consumer_obj.results)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer.wait_and_abort")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_sla_failure_abort(
self, mock_sla_checker, mock_result_consumer_wait_and_abort,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_sla_instance.add_iteration.side_effect = [True, True, False,
False]
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock()
runner = mock.MagicMock()
runner.result_queue = collections.deque(
[{"duration": 1, "timestamp": 1}] * 4)
with engine.ResultConsumer(key, task, runner, True):
pass
self.assertTrue(runner.abort.called)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_abort_manually(self, mock_sla_checker,
mock_event, mock_thread,
mock_task_get_status):
runner = mock.MagicMock(result_queue=False)
is_done = mock.MagicMock()
is_done.isSet.side_effect = (False, True)
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTED
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
eng = engine.TaskEngine({}, task)
eng.duration = 123
eng.full_duration = 456
with engine.ResultConsumer(key, task, runner, True):
pass
mock_sla_checker.assert_called_once_with(key["kw"])
mocked_set_aborted = mock_sla_checker.return_value.set_aborted_manually
mocked_set_aborted.assert_called_once_with()
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.sla.SLAChecker")
def test_consume_results_sla_failure_continue(self, mock_sla_checker,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
mock_task_get_status.return_value = consts.TaskStatus.FAILED
mock_sla_instance.add_iteration.side_effect = [True, True, False,
False]
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock()
runner = mock.MagicMock()
runner.result_queue = collections.deque(
[{"duration": 1, "timestamp": 4}] * 4)
with engine.ResultConsumer(key, task, runner, False):
pass
self.assertEqual(0, runner.abort.call_count)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.task.sla.SLAChecker")
def test_comsume_results_with_unexpected_failure(self, mock_sla_checker,
mock_event, mock_thread,
mock_task_get_status):
mock_sla_instance = mock.MagicMock()
mock_sla_checker.return_value = mock_sla_instance
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock()
runner = mock.MagicMock()
runner.result_queue = collections.deque([1])
exc = TestException()
try:
with engine.ResultConsumer(key, task, runner, False):
raise exc
except TestException:
pass
mock_sla_instance.set_unexpected_failure.assert_has_calls(
[mock.call(exc)])
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskEngine._prepare_context")
@mock.patch("rally.task.engine.time.sleep")
@mock.patch("rally.task.engine.TaskEngine._get_runner")
def test_wait_and_abort_on_abort(
self, mock_task_engine__get_runner,
mock_sleep, mock_task_engine__prepare_context,
mock_task_get_status, mock_event, mock_thread):
runner = mock.MagicMock()
key = mock.MagicMock()
task = mock.MagicMock()
mock_task_get_status.side_effect = (consts.TaskStatus.RUNNING,
consts.TaskStatus.RUNNING,
consts.TaskStatus.ABORTING)
mock_is_done = mock.MagicMock()
mock_event.return_value = mock_is_done
mock_is_done.isSet.return_value = False
res = engine.ResultConsumer(key, task, runner, True)
res.wait_and_abort()
runner.abort.assert_called_with()
# test task.get_status is checked until is_done is not set
self.assertEqual(3, mock_task_get_status.call_count)
@mock.patch("rally.task.engine.threading.Thread")
@mock.patch("rally.task.engine.threading.Event")
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskEngine._prepare_context")
@mock.patch("rally.task.engine.time.sleep")
@mock.patch("rally.task.engine.TaskEngine._get_runner")
def test_wait_and_abort_on_no_abort(
self, mock_task_engine__get_runner, mock_sleep,
mock_task_engine__prepare_context, mock_task_get_status,
mock_event, mock_thread):
runner = mock.MagicMock()
key = mock.MagicMock()
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
mock_is_done = mock.MagicMock()
mock_event.return_value = mock_is_done
mock_is_done.isSet.side_effect = [False, False, False, False, True]
res = engine.ResultConsumer(key, task, runner, True)
res.wait_and_abort()
# check method don't abort runner if task is not aborted
self.assertFalse(runner.abort.called)
# test task.get_status is checked until is_done is not set
self.assertEqual(4, mock_task_get_status.call_count)
class TaskTestCase(test.TestCase):
@mock.patch("jsonschema.validate")
def test_validate_json(self, mock_validate):
config = {}
engine.TaskConfig(config)
mock_validate.assert_has_calls([
mock.call(config, engine.TaskConfig.CONFIG_SCHEMA_V1)])
@mock.patch("jsonschema.validate")
@mock.patch("rally.task.engine.TaskConfig._make_subtasks")
def test_validate_json_v2(self, mock_task_config__make_subtasks,
mock_validate):
config = {"version": 2}
engine.TaskConfig(config)
mock_validate.assert_has_calls([
mock.call(config, engine.TaskConfig.CONFIG_SCHEMA_V2)])
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
@mock.patch("rally.task.engine.TaskConfig._make_subtasks")
def test_validate_version(self, mock_task_config__make_subtasks,
mock_task_config__validate_json,
mock_task_config__get_version):
mock_task_config__get_version.return_value = 1
engine.TaskConfig(mock.MagicMock())
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
@mock.patch("rally.task.engine.TaskConfig._make_subtasks")
def test_validate_version_wrong_version(
self, mock_task_config__make_subtasks,
mock_task_config__validate_json,
mock_task_config__get_version):
mock_task_config__get_version.return_value = "wrong"
self.assertRaises(exceptions.InvalidTaskException, engine.TaskConfig,
mock.MagicMock)
@mock.patch("rally.task.engine.SubTask")
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
def test_make_subtasks_v1(self, mock_task_config__validate_json,
mock_task_config__get_version, mock_sub_task):
mock_task_config__get_version.return_value = 1
config = {"a.task": [{"s": 1}, {"s": 2}],
"b.task": [{"s": 3}]}
self.assertEqual(3, len(engine.TaskConfig(config).subtasks))
mock_sub_task.assert_has_calls([
mock.call({
"title": "a.task",
"scenarios": [{"s": 1, "name": "a.task"}]
}),
mock.call({
"title": "a.task",
"scenarios": [{"s": 2, "name": "a.task"}]
}),
mock.call({
"title": "b.task",
"scenarios": [{"s": 3, "name": "b.task"}]
})
], any_order=True)
@mock.patch("rally.task.engine.SubTask")
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
def test_make_subtasks_v2(self, mock_task_config__validate_json,
mock_task_config__get_version, mock_sub_task):
mock_task_config__get_version.return_value = 2
subtask_conf1 = mock.MagicMock()
subtask_conf2 = mock.MagicMock()
config = {"subtasks": [subtask_conf1, subtask_conf2]}
self.assertEqual(2, len(engine.TaskConfig(config).subtasks))
mock_sub_task.assert_has_calls([
mock.call(subtask_conf1),
mock.call(subtask_conf2)])
|
|
"""
A simple workflow engine
"""
import logging
from collections import defaultdict
from datetime import datetime
from string import Template
from .task import Task
class ExitWorkflow(Exception):
"""Exit the execution of a workflow earlier"""
# Special exit status to be used by spawned programs to stop workflow
# not in sysexits.h, just to say that we require the process to stop
EXIT_STOPPED = 90
# EX_SOFTWARE according to /usr/include/sysexits.h
EXIT_FAILED = 70
# EX_TEMPFAIL according to /usr/include/sysexits.h
EXIT_CANCELLED = 75
status_messages = {
EXIT_STOPPED: "EXIT: STOPPED",
EXIT_FAILED: "EXIT: FAILED",
EXIT_CANCELLED: "EXIT: CANCELLED"
}
def __init__(self, task, status, message=None):
self.task = task
self.status = status
Exception.__init__(self, message)
@classmethod
def is_status(cls, status):
return status in cls.status_messages
def get_exit_message(self):
return self.status_messages.get(self.status, "")
class Workflow(Task):
"""Control the execution of a workflow object"""
starttask = None
include_tasks = ()
exclude_tasks = ()
settings = ()
def __init__(self, **kwargs):
params = kwargs.pop('params', {})
Task.__init__(self, **kwargs)
self.settings = dict(self.settings, **params)
self.tasks = list(self._tasks())
def _tasks(self):
tasks = walk(self.starttask)
exclude = self.exclude_tasks or ()
include = self.include_tasks or ()
for i, task in enumerate(tasks):
taskid = task.__class__.__name__
if include:
skipped = (taskid not in include) and (str(i) not in include)
elif exclude:
skipped = (taskid in exclude) or (str(i) in exclude)
else:
skipped = False
yield i, task, skipped
def _execute(self):
esettings = _tsettings(self.settings)
for i, task, skipped in self.tasks:
if skipped:
self.log('Task skipped: %i-%s', i, task)
continue
_texpand(task, esettings)
starttime = datetime.now()
self.log('Task started: %i-%s', i, task)
try:
task.execute()
except Exception:
self.log('Task failed: %i-%s in %s', i, task, \
datetime.now() - starttime, level=logging.ERROR)
raise
else:
self.log('Task succeed: %i-%s in %s', i, task, \
datetime.now() - starttime)
def execute(self):
starttime = datetime.now()
self.log('Workflow started')
try:
self._execute()
except ExitWorkflow, exc:
tmsg = "Task %s stopped the workflow with exit status '%s' in %s"
msg = tmsg % (exc.task, exc.get_exit_message(),
datetime.now() - starttime)
if exc.status == ExitWorkflow.EXIT_STOPPED:
self.log('Workflow stopped: %s' % msg)
elif exc.status == ExitWorkflow.EXIT_CANCELLED:
self.log('Workflow cancelled: %s' % msg)
elif exc.status == ExitWorkflow.EXIT_FAILED:
self.log('Workflow failed: %s' % msg)
raise
else:
raise
except Exception:
self.log('Workflow failed in %s', \
datetime.now() - starttime, level=logging.ERROR)
raise
else:
self.log('Workflow succeed in %s', datetime.now() - starttime)
def walk(starttask):
"""Walk starttask and build ordered list of subtasks to execute
>>> t1 = Task(taskname='T1')
>>> t2 = Task(taskname='T2', deps=[t1])
>>> t3 = Task(taskname='T3', deps=[t2])
>>> t4 = Task(taskname='T4', deps=[t3, t1])
>>> [t.taskname for t in walk(t4)]
['T1', 'T2', 'T3', 'T4']
"""
clsmap = {}
def _n(task):
if type(task) is type:
# task referenced by its class is instanciated once
task = clsmap.get(task) or clsmap.setdefault(task, task())
assert isinstance(task, Task), \
'Require a Task instance, got %s' % type(task)
return task
def _dfs(task, ttl=500):
assert ttl, 'DFS reached depth limit, check for cyclic dependencies'
yield task
for dep in map(_n, task.deps):
yield dep
for subdep in _dfs(dep, ttl-1):
yield subdep
seen = set()
for t in reversed(list(_dfs(_n(starttask)))):
if t not in seen:
seen.add(t)
yield t
def find_redundant_deps(starttask):
"""Returns a list of of tuples of the form (task, dep, seen_in) where:
* (task, dep) is the dependency to remove
* seen_in is a list saying which are the linked task where the dependency ocurrs
Doctest:
>>> class Task0(Task):
... pass
>>> class Task1(Task):
... deps = [Task0]
>>> class Task2(Task):
... deps = [Task0, Task1]
>>> class Task3(Task):
... deps = [Task0, Task2]
>>> list(find_redundant_deps(Task3))
[(('Task2', 'Task0'), ['Task1']), (('Task3', 'Task0'), ['Task1', 'Task2'])]
"""
seen = defaultdict(set)
for task in walk(starttask):
tid = taskid(task)
taskpath = [taskid(s) for s in walk(task)]
for dep in task.deps:
depid = taskid(dep)
seenin = [t for t in taskpath if t in seen[depid]]
if seenin:
yield (tid, depid), seenin
seen[depid].add(tid)
def taskid(task):
"""Returns the task id"""
return task.__name__ if type(task) is type else task.__class__.__name__
def _texpand(task, settings):
"""Expand templates found in task attributes
>>> t = Task(output='$path/$leaf', leaf='useless', path=['$prefix/path'])
>>> settings = dict(prefix='/tmp', path='/tmp/path/tmp')
>>> _texpand(t, settings)
>>> t.output
'/tmp/path/tmp/$leaf'
>>> t.path
['/tmp/path']
>>> settings['leaf'] = 'dir'
>>> _texpand(t, settings)
>>> t.output
'/tmp/path/tmp/dir'
"""
for attr in dir(task):
if attr.startswith("_"):
continue
v = getattr(task, attr)
if not callable(v):
nv = _titem(v, settings)
try:
setattr(task, attr, nv)
except AttributeError:
# it may happen that the attribute is a property
pass
def _tsettings(settings):
"""Returns expanded settings
Expand same template used twice in same value
>>> _tsettings(dict(prefix='/tmp', path='$prefix/path$prefix'))
{'path': '/tmp/path/tmp', 'prefix': '/tmp'}
Looping expansion must fail
>>> try:
... _tsettings(dict(loopvar='$loopvar'))
... except AssertionError, ex:
... if 'Recursive value found' not in str(ex):
... raise
... else:
... raise AssertionError('loopvar expansion ignored')
>>> try:
... _tsettings(dict(var1='$var2', var2='$var1'))
... except AssertionError, ex:
... if 'Recursive value found' not in str(ex):
... raise
... else:
... raise AssertionError('loopvar expansion ignored')
"""
tvars = dict(settings)
modified = True
while modified:
modified = False
for k, v in tvars.items():
if isinstance(v, basestring) and '$' in v:
assert k not in v or v == _tsub(v, {k: ''}), \
"Recursive value found during expansion: %r" % v
nv = _tsub(v, tvars)
if nv != v:
tvars[k] = nv
modified = True
return tvars
def _titem(v, tvars):
"""Replace templates
>>> tvars = dict(a=1, b=2)
>>> _titem('$a/$b', tvars)
'1/2'
>>> _titem(['$a', '$b'], tvars)
['1', '2']
>>> _titem(dict(r='$a', c=2), tvars)
{'c': 2, 'r': '1'}
>>> _titem(['$a', 1, dict(r='$a', c=2), '$b'], tvars)
['1', 1, {'c': 2, 'r': '1'}, '2']
>>> _titem(('$a', 1, '$b'), tvars)
('1', 1, '2')
>>> deps = [object(), object()]
>>> _titem(deps, tvars) == deps
True
"""
if isinstance(v, list):
return [_titem(v, tvars) for v in v]
elif isinstance(v, tuple):
return tuple(_titem(v, tvars) for v in v)
elif isinstance(v, dict):
return dict((k, _titem(v, tvars)) for k, v in v.iteritems())
elif isinstance(v, basestring) and '$' in v:
return _tsub(v, tvars)
else:
return v
def _tsub(tmpl, *args, **kwargs):
return Template(tmpl).safe_substitute(*args, **kwargs)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import StringIO
import sys
import mock
from oslo_config import cfg
import rtslib
from cinder.cmd import all as cinder_all
from cinder.cmd import api as cinder_api
from cinder.cmd import backup as cinder_backup
from cinder.cmd import manage as cinder_manage
from cinder.cmd import rtstool as cinder_rtstool
from cinder.cmd import scheduler as cinder_scheduler
from cinder.cmd import volume as cinder_volume
from cinder.cmd import volume_usage_audit
from cinder import context
from cinder import test
from cinder import version
CONF = cfg.CONF
class TestCinderApiCmd(test.TestCase):
"""Unit test cases for python modules under cinder/cmd."""
def setUp(self):
super(TestCinderApiCmd, self).setUp()
sys.argv = ['cinder-api']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderApiCmd, self).tearDown()
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.setup')
def test_main(self, log_setup, monkey_patch, rpc_init, process_launcher,
wsgi_service):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
cinder_api.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
monkey_patch.assert_called_once_with()
rpc_init.assert_called_once_with(CONF)
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_called_once_with(server,
workers=server.workers)
launcher.wait.assert_called_once_with()
class TestCinderBackupCmd(test.TestCase):
def setUp(self):
super(TestCinderBackupCmd, self).setUp()
sys.argv = ['cinder-backup']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderBackupCmd, self).tearDown()
@mock.patch('cinder.service.wait')
@mock.patch('cinder.service.serve')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.setup')
def test_main(self, log_setup, monkey_patch, service_create, service_serve,
service_wait):
server = service_create.return_value
cinder_backup.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-backup')
service_serve.assert_called_once_with(server)
service_wait.assert_called_once_with()
class TestCinderAllCmd(test.TestCase):
def setUp(self):
super(TestCinderAllCmd, self).setUp()
sys.argv = ['cinder-all']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderAllCmd, self).tearDown()
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
def test_main(self, log_setup, get_logger, monkey_patch, process_launcher,
wsgi_service, service_create):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
service = service_create.return_value
cinder_all.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder.all')
monkey_patch.assert_called_once_with()
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_any_call(server, workers=server.workers)
service_create.assert_has_calls([mock.call(binary='cinder-volume'),
mock.call(binary='cinder-scheduler'),
mock.call(binary='cinder-backup')])
self.assertEqual(3, service_create.call_count)
launcher.launch_service.assert_has_calls([mock.call(service)] * 3)
self.assertEqual(4, launcher.launch_service.call_count)
launcher.wait.assert_called_once_with()
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
def test_main_load_osapi_volume_exception(self, log_setup, get_logger,
monkey_patch, process_launcher,
wsgi_service, service_create):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
mock_log = get_logger.return_value
for ex in (Exception(), SystemExit()):
launcher.launch_service.side_effect = ex
cinder_all.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder.all')
monkey_patch.assert_called_once_with()
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_any_call(server,
workers=server.workers)
self.assertTrue(mock_log.exception.called)
# Reset for the next exception
log_setup.reset_mock()
get_logger.reset_mock()
monkey_patch.reset_mock()
process_launcher.reset_mock()
wsgi_service.reset_mock()
mock_log.reset_mock()
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.service.WSGIService')
@mock.patch('cinder.service.process_launcher')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
def test_main_load_binary_exception(self, log_setup, get_logger,
monkey_patch, process_launcher,
wsgi_service, service_create):
launcher = process_launcher.return_value
server = wsgi_service.return_value
server.workers = mock.sentinel.worker_count
service = service_create.return_value
mock_log = get_logger.return_value
def launch_service(*args, **kwargs):
if service in args:
raise Exception()
launcher.launch_service.side_effect = launch_service
cinder_all.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder.all')
monkey_patch.assert_called_once_with()
process_launcher.assert_called_once_with()
wsgi_service.assert_called_once_with('osapi_volume')
launcher.launch_service.assert_any_call(server,
workers=server.workers)
for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
service_create.assert_any_call(binary=binary)
launcher.launch_service.assert_called_with(service)
self.assertTrue(mock_log.exception.called)
class TestCinderSchedulerCmd(test.TestCase):
def setUp(self):
super(TestCinderSchedulerCmd, self).setUp()
sys.argv = ['cinder-scheduler']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderSchedulerCmd, self).tearDown()
@mock.patch('cinder.service.wait')
@mock.patch('cinder.service.serve')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
service_serve, service_wait):
server = service_create.return_value
cinder_scheduler.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
monkey_patch.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-scheduler')
service_serve.assert_called_once_with(server)
service_wait.assert_called_once_with()
class TestCinderVolumeCmd(test.TestCase):
def setUp(self):
super(TestCinderVolumeCmd, self).setUp()
sys.argv = ['cinder-volume']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderVolumeCmd, self).tearDown()
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.setup')
def test_main(self, log_setup, monkey_patch, service_create,
get_launcher):
CONF.set_override('enabled_backends', None)
launcher = get_launcher.return_value
server = service_create.return_value
cinder_volume.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
monkey_patch.assert_called_once_with()
get_launcher.assert_called_once_with()
service_create.assert_called_once_with(binary='cinder-volume')
launcher.launch_service.assert_called_once_with(server)
launcher.wait.assert_called_once_with()
@mock.patch('cinder.service.get_launcher')
@mock.patch('cinder.service.Service.create')
@mock.patch('cinder.utils.monkey_patch')
@mock.patch('cinder.openstack.common.log.setup')
def test_main_with_backends(self, log_setup, monkey_patch, service_create,
get_launcher):
backends = ['backend1', 'backend2']
CONF.set_override('enabled_backends', backends)
launcher = get_launcher.return_value
cinder_volume.main()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
monkey_patch.assert_called_once_with()
get_launcher.assert_called_once_with()
self.assertEqual(len(backends), service_create.call_count)
self.assertEqual(len(backends), launcher.launch_service.call_count)
launcher.wait.assert_called_once_with()
class TestCinderManageCmd(test.TestCase):
def setUp(self):
super(TestCinderManageCmd, self).setUp()
sys.argv = ['cinder-manage']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderManageCmd, self).tearDown()
@mock.patch('cinder.openstack.common.uuidutils.is_uuid_like')
def test_param2id(self, is_uuid_like):
mock_object_id = mock.MagicMock()
is_uuid_like.return_value = True
object_id = cinder_manage.param2id(mock_object_id)
self.assertEqual(mock_object_id, object_id)
is_uuid_like.assert_called_once_with(mock_object_id)
@mock.patch('cinder.openstack.common.uuidutils.is_uuid_like')
def test_param2id_int_string(self, is_uuid_like):
object_id_str = '10'
is_uuid_like.return_value = False
object_id = cinder_manage.param2id(object_id_str)
self.assertEqual(10, object_id)
is_uuid_like.assert_called_once_with(object_id_str)
@mock.patch('cinder.db.migration.db_sync')
def test_db_commands_sync(self, db_sync):
version = mock.MagicMock()
db_cmds = cinder_manage.DbCommands()
db_cmds.sync(version=version)
db_sync.assert_called_once_with(version)
@mock.patch('oslo_db.sqlalchemy.migration.db_version')
def test_db_commands_version(self, db_version):
db_cmds = cinder_manage.DbCommands()
db_cmds.version()
self.assertEqual(1, db_version.call_count)
@mock.patch('cinder.version.version_string')
def test_versions_commands_list(self, version_string):
version_cmds = cinder_manage.VersionCommands()
version_cmds.list()
version_string.assert_called_once_with()
@mock.patch('cinder.version.version_string')
def test_versions_commands_call(self, version_string):
version_cmds = cinder_manage.VersionCommands()
version_cmds.__call__()
version_string.assert_called_once_with()
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_host_commands_list(self, get_admin_context, service_get_all):
get_admin_context.return_value = mock.sentinel.ctxt
service_get_all.return_value = [{'host': 'fake-host',
'availability_zone': 'fake-az'}]
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ("%(host)-25s\t%(zone)-15s\n" %
{'host': 'host', 'zone': 'zone'})
expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" %
{'host': 'fake-host',
'availability_zone': 'fake-az'})
host_cmds = cinder_manage.HostCommands()
host_cmds.list()
get_admin_context.assert_called_once_with()
service_get_all.assert_called_once_with(mock.sentinel.ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_host_commands_list_with_zone(self, get_admin_context,
service_get_all):
get_admin_context.return_value = mock.sentinel.ctxt
service_get_all.return_value = [{'host': 'fake-host',
'availability_zone': 'fake-az1'},
{'host': 'fake-host',
'availability_zone': 'fake-az2'}]
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ("%(host)-25s\t%(zone)-15s\n" %
{'host': 'host', 'zone': 'zone'})
expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" %
{'host': 'fake-host',
'availability_zone': 'fake-az1'})
host_cmds = cinder_manage.HostCommands()
host_cmds.list(zone='fake-az1')
get_admin_context.assert_called_once_with()
service_get_all.assert_called_once_with(mock.sentinel.ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.rpc.get_client')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.rpc.initialized', return_value=False)
@mock.patch('oslo.messaging.Target')
def test_volume_commands_init(self, messaging_target, rpc_initialized,
rpc_init, get_client):
CONF.set_override('volume_topic', 'fake-topic')
mock_target = messaging_target.return_value
mock_rpc_client = get_client.return_value
volume_cmds = cinder_manage.VolumeCommands()
rpc_client = volume_cmds.rpc_client()
rpc_initialized.assert_called_once_with()
rpc_init.assert_called_once_with(CONF)
messaging_target.assert_called_once_with(topic='fake-topic')
get_client.assert_called_once_with(mock_target)
self.assertEqual(mock_rpc_client, rpc_client)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.get_client')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete(self, rpc_init, get_client,
get_admin_context, volume_get):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
mock_client = mock.MagicMock()
cctxt = mock.MagicMock()
mock_client.prepare.return_value = cctxt
get_client.return_value = mock_client
volume_id = '123'
volume = {'id': volume_id, 'host': 'fake-host', 'status': 'available'}
volume_get.return_value = volume
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds._client = mock_client
volume_cmds.delete(volume_id)
volume_get.assert_called_once_with(ctxt, 123)
mock_client.prepare.assert_called_once_with(server=volume['host'])
cctxt.cast.assert_called_once_with(ctxt, 'delete_volume',
volume_id=volume['id'])
@mock.patch('cinder.db.volume_destroy')
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete_no_host(self, rpc_init, get_admin_context,
volume_get, volume_destroy):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
volume_id = '123'
volume = {'id': volume_id, 'host': None, 'status': 'available'}
volume_get.return_value = volume
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ('Volume not yet assigned to host.\n'
'Deleting volume from database and skipping'
' rpc.\n')
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds.delete(volume_id)
get_admin_context.assert_called_once_with()
volume_get.assert_called_once_with(ctxt, 123)
volume_destroy.assert_called_once_with(ctxt, 123)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.db.volume_destroy')
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.rpc.init')
def test_volume_commands_delete_volume_in_use(self, rpc_init,
get_admin_context,
volume_get, volume_destroy):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
volume_id = '123'
volume = {'id': volume_id, 'host': 'fake-host', 'status': 'in-use'}
volume_get.return_value = volume
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ('Volume is in-use.\n'
'Detach volume from instance and then try'
' again.\n')
volume_cmds = cinder_manage.VolumeCommands()
volume_cmds.delete(volume_id)
volume_get.assert_called_once_with(ctxt, 123)
self.assertEqual(expected_out, fake_out.getvalue())
def test_config_commands_list(self):
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
expected_out = ''
for key, value in CONF.iteritems():
expected_out += '%s = %s' % (key, value) + '\n'
config_cmds = cinder_manage.ConfigCommands()
config_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
def test_config_commands_list_param(self):
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
CONF.set_override('host', 'fake')
expected_out = 'host = fake\n'
config_cmds = cinder_manage.ConfigCommands()
config_cmds.list(param='host')
self.assertEqual(expected_out, fake_out.getvalue())
def test_get_log_commands_no_errors(self):
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
CONF.set_override('log_dir', None)
expected_out = 'No errors in logfiles!\n'
get_log_cmds = cinder_manage.GetLogCommands()
get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('__builtin__.open')
@mock.patch('os.listdir')
def test_get_log_commands_errors(self, listdir, open):
CONF.set_override('log_dir', 'fake-dir')
listdir.return_value = ['fake-error.log']
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
open.return_value = StringIO.StringIO(
'[ ERROR ] fake-error-message')
expected_out = ('fake-dir/fake-error.log:-\n'
'Line 1 : [ ERROR ] fake-error-message\n')
get_log_cmds = cinder_manage.GetLogCommands()
get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
open.assert_called_once_with('fake-dir/fake-error.log', 'r')
listdir.assert_called_once_with(CONF.log_dir)
@mock.patch('__builtin__.open')
@mock.patch('os.path.exists')
def test_get_log_commands_syslog_no_log_file(self, path_exists, open):
path_exists.return_value = False
get_log_cmds = cinder_manage.GetLogCommands()
exit = self.assertRaises(SystemExit, get_log_cmds.syslog)
self.assertEqual(exit.code, 1)
path_exists.assert_any_call('/var/log/syslog')
path_exists.assert_any_call('/var/log/messages')
@mock.patch('cinder.db.backup_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_backup_commands_list(self, get_admin_context, backup_get_all):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
backup = {'id': 1,
'user_id': 'fake-user-id',
'project_id': 'fake-project-id',
'host': 'fake-host',
'display_name': 'fake-display-name',
'container': 'fake-container',
'status': 'fake-status',
'size': 123,
'object_count': 1}
backup_get_all.return_value = [backup]
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s'
'\t%-12s')
header = hdr % ('ID',
'User ID',
'Project ID',
'Host',
'Name',
'Container',
'Status',
'Size',
'Object Count')
res = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d'
'\t%-12s')
resource = res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
1)
expected_out = header + '\n' + resource + '\n'
backup_cmds = cinder_manage.BackupCommands()
backup_cmds.list()
get_admin_context.assert_called_once_with()
backup_get_all.assert_called_once_with(ctxt)
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.utils.service_is_up')
@mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.context.get_admin_context')
def test_service_commands_list(self, get_admin_context, service_get_all,
service_is_up):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
service = {'binary': 'cinder-binary',
'host': 'fake-host.fake-domain',
'availability_zone': 'fake-zone',
'updated_at': '2014-06-30 11:22:33',
'disabled': False}
service_get_all.return_value = [service]
service_is_up.return_value = True
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print_format = format % ('Binary',
'Host',
'Zone',
'Status',
'State',
'Updated At')
service_format = format % (service['binary'],
service['host'].partition('.')[0],
service['availability_zone'],
'enabled',
':-)',
service['updated_at'])
expected_out = print_format + '\n' + service_format + '\n'
service_cmds = cinder_manage.ServiceCommands()
service_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
get_admin_context.assert_called_with()
service_get_all.assert_called_with(ctxt)
service_is_up.assert_called_with(service)
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_argv_lt_2(self, register_cli_opt):
script_name = 'cinder-manage'
sys.argv = [script_name]
CONF(sys.argv[1:], project='cinder', version=version.version_string())
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('cinder.openstack.common.log.setup')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_sudo_failed(self, register_cli_opt, log_setup,
config_opts_call):
script_name = 'cinder-manage'
sys.argv = [script_name, 'fake_category', 'fake_action']
config_opts_call.side_effect = cfg.ConfigFilesNotFoundError(
mock.sentinel._namespace)
exit = self.assertRaises(SystemExit, cinder_manage.main)
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder',
version=version.version_string())
self.assertFalse(log_setup.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main(self, register_cli_opt, config_opts_call):
script_name = 'cinder-manage'
sys.argv = [script_name, 'config', 'list']
action_fn = mock.MagicMock()
CONF.category = mock.MagicMock(action_fn=action_fn)
cinder_manage.main()
self.assertTrue(register_cli_opt.called)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='cinder', version=version.version_string())
self.assertTrue(action_fn.called)
class TestCinderRtstoolCmd(test.TestCase):
def setUp(self):
super(TestCinderRtstoolCmd, self).setUp()
sys.argv = ['cinder-rtstool']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderRtstoolCmd, self).tearDown()
@mock.patch('rtslib.root.RTSRoot')
def test_create_rtsllib_error(self, rtsroot):
rtsroot.side_effect = rtslib.utils.RTSLibError()
self.assertRaises(rtslib.utils.RTSLibError, cinder_rtstool.create,
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password)
def _test_create_rtsllib_error_network_portal(self, ip):
with contextlib.nested(
mock.patch('rtslib.NetworkPortal'),
mock.patch('rtslib.LUN'),
mock.patch('rtslib.TPG'),
mock.patch('rtslib.FabricModule'),
mock.patch('rtslib.Target'),
mock.patch('rtslib.BlockStorageObject'),
mock.patch('rtslib.root.RTSRoot')
) as (network_portal, lun, tpg, fabric_module, target,
block_storage_object, rts_root):
root_new = mock.MagicMock(storage_objects=mock.MagicMock())
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
if ip == '0.0.0.0':
network_portal.side_effect = rtslib.utils.RTSLibError()
self.assertRaises(rtslib.utils.RTSLibError,
cinder_rtstool.create,
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password)
else:
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password)
rts_root.assert_called_once_with()
block_storage_object.assert_called_once_with(
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
'1')
lun.assert_called_once_with(tpg_new,
storage_object=mock.sentinel.so_new)
self.assertEqual(1, tpg_new.enable)
network_portal.assert_any_call(tpg_new, ip, 3260,
mode='any')
if ip == '::0':
network_portal.assert_any_call(tpg_new, ip, 3260, mode='any')
def test_create_rtsllib_error_network_portal_ipv4(self):
self._test_create_rtsllib_error_network_portal('0.0.0.0')
def test_create_rtsllib_error_network_portal_ipv6(self):
self._test_create_rtsllib_error_network_portal('::0')
def _test_create(self, ip):
with contextlib.nested(
mock.patch('rtslib.NetworkPortal'),
mock.patch('rtslib.LUN'),
mock.patch('rtslib.TPG'),
mock.patch('rtslib.FabricModule'),
mock.patch('rtslib.Target'),
mock.patch('rtslib.BlockStorageObject'),
mock.patch('rtslib.root.RTSRoot')
) as (network_portal, lun, tpg, fabric_module, target,
block_storage_object, rts_root):
root_new = mock.MagicMock(storage_objects=mock.MagicMock())
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
def network_portal_exception(*args, **kwargs):
if set([tpg_new, '::0', 3260]).issubset(list(args)):
raise rtslib.utils.RTSLibError()
else:
pass
cinder_rtstool.create(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password)
rts_root.assert_called_once_with()
block_storage_object.assert_called_once_with(
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
'1')
lun.assert_called_once_with(tpg_new,
storage_object=mock.sentinel.so_new)
self.assertEqual(1, tpg_new.enable)
network_portal.assert_any_call(tpg_new, ip, 3260,
mode='any')
if ip == '::0':
network_portal.assert_any_call(tpg_new, ip, 3260, mode='any')
def test_create_ipv4(self):
self._test_create('0.0.0.0')
def test_create_ipv6(self):
self._test_create('::0')
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator_rtslib_error(self, rtsroot):
rtsroot.side_effect = rtslib.utils.RTSLibError()
self.assertRaises(rtslib.utils.RTSLibError,
cinder_rtstool.add_initiator,
mock.sentinel.target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator_rtstool_error(self, rtsroot):
rtsroot.targets.return_value = {}
self.assertRaises(cinder_rtstool.RtstoolError,
cinder_rtstool.add_initiator,
mock.sentinel.target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
@mock.patch('rtslib.MappedLUN')
@mock.patch('rtslib.NodeACL')
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator_acl_exists(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': mock.sentinel.initiator_iqn}]
acl = mock.MagicMock(node_wwn=mock.sentinel.initiator_iqn)
tpg = mock.MagicMock(node_acls=[acl])
tpgs = mock.MagicMock()
tpgs.next.return_value = tpg
target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
cinder_rtstool.add_initiator(target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
self.assertFalse(node_acl.called)
self.assertFalse(mapped_lun.called)
@mock.patch('rtslib.MappedLUN')
@mock.patch('rtslib.NodeACL')
@mock.patch('rtslib.root.RTSRoot')
def test_add_initiator(self, rtsroot, node_acl, mapped_lun):
target_iqn = mock.MagicMock()
target_iqn.tpgs.return_value = \
[{'node_acls': mock.sentinel.initiator_iqn}]
tpg = mock.MagicMock()
target = mock.MagicMock(tpgs=tpg, wwn=target_iqn)
rtsroot.return_value = mock.MagicMock(targets=[target])
acl_new = mock.MagicMock(chap_userid=mock.sentinel.userid,
chap_password=mock.sentinel.password)
node_acl.return_value = acl_new
cinder_rtstool.add_initiator(target_iqn,
mock.sentinel.initiator_iqn,
mock.sentinel.userid,
mock.sentinel.password)
node_acl.assert_called_once_with(tpg.next(),
mock.sentinel.initiator_iqn,
mode='create')
mapped_lun.assert_called_once_with(acl_new, 0, tpg_lun=0)
@mock.patch('rtslib.root.RTSRoot')
def test_get_targets(self, rtsroot):
target = mock.MagicMock()
target.dump.return_value = {'wwn': 'fake-wwn'}
rtsroot.return_value = mock.MagicMock(targets=[target])
with mock.patch('sys.stdout', new=StringIO.StringIO()) as fake_out:
cinder_rtstool.get_targets()
self.assertEqual(str(target.wwn), fake_out.getvalue().strip())
@mock.patch('rtslib.root.RTSRoot')
def test_delete(self, rtsroot):
target = mock.MagicMock(wwn=mock.sentinel.iqn)
storage_object = mock.MagicMock()
name = mock.PropertyMock(return_value=mock.sentinel.iqn)
type(storage_object).name = name
rtsroot.return_value = mock.MagicMock(
targets=[target], storage_objects=[storage_object])
cinder_rtstool.delete(mock.sentinel.iqn)
target.delete.assert_called_once_with()
storage_object.delete.assert_called_once_with()
def test_usage(self):
exit = self.assertRaises(SystemExit, cinder_rtstool.usage)
self.assertEqual(exit.code, 1)
@mock.patch('cinder.cmd.rtstool.usage')
def test_main_argc_lt_2(self, usage):
usage.side_effect = SystemExit(1)
sys.argv = ['cinder-rtstool']
exit = self.assertRaises(SystemExit, cinder_rtstool.usage)
self.assertTrue(usage.called)
self.assertEqual(exit.code, 1)
def test_main_create_argv_lt_6(self):
sys.argv = ['cinder-rtstool', 'create']
self._test_main_check_argv()
def test_main_create_argv_gt_7(self):
sys.argv = ['cinder-rtstool', 'create', 'fake-arg1', 'fake-arg2',
'fake-arg3', 'fake-arg4', 'fake-arg5', 'fake-arg6']
self._test_main_check_argv()
def test_main_add_initiator_argv_lt_6(self):
sys.argv = ['cinder-rtstool', 'add-initiator']
self._test_main_check_argv()
def test_main_delete_argv_lt_3(self):
sys.argv = ['cinder-rtstool', 'delete']
self._test_main_check_argv()
def test_main_no_action(self):
sys.argv = ['cinder-rtstool']
self._test_main_check_argv()
def _test_main_check_argv(self):
with mock.patch('cinder.cmd.rtstool.usage') as usage:
usage.side_effect = SystemExit(1)
sys.argv = ['cinder-rtstool', 'create']
exit = self.assertRaises(SystemExit, cinder_rtstool.main)
self.assertTrue(usage.called)
self.assertEqual(exit.code, 1)
def test_main_create(self):
with mock.patch('cinder.cmd.rtstool.create') as create:
sys.argv = ['cinder-rtstool',
'create',
mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.initiator_iqns]
rc = cinder_rtstool.main()
create.assert_called_once_with(mock.sentinel.backing_device,
mock.sentinel.name,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.initiator_iqns)
self.assertEqual(0, rc)
def test_main_add_initiator(self):
with mock.patch('cinder.cmd.rtstool.add_initiator') as add_initiator:
sys.argv = ['cinder-rtstool',
'add-initiator',
mock.sentinel.target_iqn,
mock.sentinel.userid,
mock.sentinel.password,
mock.sentinel.initiator_iqns]
rc = cinder_rtstool.main()
add_initiator.assert_called_once_with(
mock.sentinel.target_iqn, mock.sentinel.initiator_iqns,
mock.sentinel.userid, mock.sentinel.password)
self.assertEqual(0, rc)
def test_main_get_targets(self):
with mock.patch('cinder.cmd.rtstool.get_targets') as get_targets:
sys.argv = ['cinder-rtstool', 'get-targets']
rc = cinder_rtstool.main()
get_targets.assert_called_once_with()
self.assertEqual(0, rc)
def test_main_delete(self):
with mock.patch('cinder.cmd.rtstool.delete') as delete:
sys.argv = ['cinder-rtstool', 'delete', mock.sentinel.iqn]
rc = cinder_rtstool.main()
delete.assert_called_once_with(mock.sentinel.iqn)
self.assertEqual(0, rc)
def test_main_verify(self):
with mock.patch('cinder.cmd.rtstool.verify_rtslib') as verify_rtslib:
sys.argv = ['cinder-rtstool', 'verify']
rc = cinder_rtstool.main()
verify_rtslib.assert_called_once_with()
self.assertEqual(0, rc)
class TestCinderVolumeUsageAuditCmd(test.TestCase):
def setUp(self):
super(TestCinderVolumeUsageAuditCmd, self).setUp()
sys.argv = ['cinder-volume-usage-audit']
CONF(sys.argv[1:], project='cinder', version=version.version_string())
def tearDown(self):
super(TestCinderVolumeUsageAuditCmd, self).tearDown()
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_time_error(self, get_admin_context, log_setup, get_logger,
version_string, rpc_init,
last_completed_audit_period):
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2013-01-01 01:00:00')
last_completed_audit_period.return_value = (mock.sentinel.begin,
mock.sentinel.end)
exit = self.assertRaises(SystemExit, volume_usage_audit.main)
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder')
self.assertEqual(exit.code, -1)
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_create_volume_error(self, get_admin_context, log_setup,
get_logger, version_string,
rpc_init,
last_completed_audit_period,
volume_get_active_by_window,
notify_about_volume_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
volume1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
def _notify_about_volume_usage(*args, **kwargs):
if 'create.end' in args:
raise Exception()
else:
pass
notify_about_volume_usage.side_effect = _notify_about_volume_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
notify_about_volume_usage.assert_any_call(ctxt, volume1, 'exists',
extra_usage_info=extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.start', extra_usage_info=local_extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.end', extra_usage_info=local_extra_info)
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_delete_volume_error(self, get_admin_context, log_setup,
get_logger, version_string,
rpc_init,
last_completed_audit_period,
volume_get_active_by_window,
notify_about_volume_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
volume1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(volume1.deleted_at),
'audit_period_ending': str(volume1.deleted_at),
}
def _notify_about_volume_usage(*args, **kwargs):
if 'delete.end' in args:
raise Exception()
else:
pass
notify_about_volume_usage.side_effect = _notify_about_volume_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'exists', extra_usage_info=extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.start',
extra_usage_info=local_extra_info_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.end',
extra_usage_info=local_extra_info_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.start',
extra_usage_info=local_extra_info_delete)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.end',
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
@mock.patch('cinder.db.snapshot_get_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main_send_snapshot_error(self, get_admin_context,
log_setup, get_logger,
version_string, rpc_init,
last_completed_audit_period,
volume_get_active_by_window,
notify_about_volume_usage,
snapshot_get_active_by_window,
notify_about_snapshot_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0)
snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
snapshot1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=snapshot1_created,
deleted_at=snapshot1_deleted)
volume_get_active_by_window.return_value = []
snapshot_get_active_by_window.return_value = [snapshot1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
local_extra_info_create = {
'audit_period_beginning': str(snapshot1.created_at),
'audit_period_ending': str(snapshot1.created_at),
}
local_extra_info_delete = {
'audit_period_beginning': str(snapshot1.deleted_at),
'audit_period_ending': str(snapshot1.deleted_at),
}
def _notify_about_snapshot_usage(*args, **kwargs):
# notify_about_snapshot_usage raises an exception, but does not
# block
raise Exception()
notify_about_snapshot_usage.side_effect = _notify_about_snapshot_usage
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
self.assertFalse(notify_about_volume_usage.called)
notify_about_snapshot_usage.assert_any_call(ctxt, snapshot1, 'exists',
extra_info)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'create.start',
extra_usage_info=local_extra_info_create)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'delete.start',
extra_usage_info=local_extra_info_delete)
@mock.patch('cinder.volume.utils.notify_about_snapshot_usage')
@mock.patch('cinder.db.snapshot_get_active_by_window')
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
@mock.patch('cinder.db.volume_get_active_by_window')
@mock.patch('cinder.utils.last_completed_audit_period')
@mock.patch('cinder.rpc.init')
@mock.patch('cinder.version.version_string')
@mock.patch('cinder.openstack.common.log.getLogger')
@mock.patch('cinder.openstack.common.log.setup')
@mock.patch('cinder.context.get_admin_context')
def test_main(self, get_admin_context, log_setup, get_logger,
version_string, rpc_init, last_completed_audit_period,
volume_get_active_by_window, notify_about_volume_usage,
snapshot_get_active_by_window, notify_about_snapshot_usage):
CONF.set_override('send_actions', True)
CONF.set_override('start_time', '2014-01-01 01:00:00')
CONF.set_override('end_time', '2014-02-02 02:00:00')
begin = datetime.datetime(2014, 1, 1, 1, 0)
end = datetime.datetime(2014, 2, 2, 2, 0)
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
last_completed_audit_period.return_value = (begin, end)
volume1_created = datetime.datetime(2014, 1, 1, 2, 0)
volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
volume1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=volume1_created,
deleted_at=volume1_deleted)
volume_get_active_by_window.return_value = [volume1]
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
extra_info_volume_create = {
'audit_period_beginning': str(volume1.created_at),
'audit_period_ending': str(volume1.created_at),
}
extra_info_volume_delete = {
'audit_period_beginning': str(volume1.deleted_at),
'audit_period_ending': str(volume1.deleted_at),
}
snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0)
snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0)
snapshot1 = mock.MagicMock(id='1', project_id='fake-project',
created_at=snapshot1_created,
deleted_at=snapshot1_deleted)
snapshot_get_active_by_window.return_value = [snapshot1]
extra_info_snapshot_create = {
'audit_period_beginning': str(snapshot1.created_at),
'audit_period_ending': str(snapshot1.created_at),
}
extra_info_snapshot_delete = {
'audit_period_beginning': str(snapshot1.deleted_at),
'audit_period_ending': str(snapshot1.deleted_at),
}
volume_usage_audit.main()
get_admin_context.assert_called_once_with()
self.assertEqual(CONF.project, 'cinder')
self.assertEqual(CONF.version, version.version_string())
log_setup.assert_called_once_with("cinder")
get_logger.assert_called_once_with('cinder')
rpc_init.assert_called_once_with(CONF)
last_completed_audit_period.assert_called_once_with()
volume_get_active_by_window.assert_called_once_with(ctxt, begin, end)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'exists', extra_usage_info=extra_info)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.start',
extra_usage_info=extra_info_volume_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'create.end',
extra_usage_info=extra_info_volume_create)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.start',
extra_usage_info=extra_info_volume_delete)
notify_about_volume_usage.assert_any_call(
ctxt, volume1, 'delete.end',
extra_usage_info=extra_info_volume_delete)
notify_about_snapshot_usage.assert_any_call(ctxt, snapshot1,
'exists', extra_info)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'create.start',
extra_usage_info=extra_info_snapshot_create)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'create.end',
extra_usage_info=extra_info_snapshot_create)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'delete.start',
extra_usage_info=extra_info_snapshot_delete)
notify_about_snapshot_usage.assert_any_call(
ctxt, snapshot1, 'delete.end',
extra_usage_info=extra_info_snapshot_delete)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GceClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute.cluster_resolver import GceClusterResolver
from tensorflow.python.distribute.cluster_resolver import UnionClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
mock = test.mock
class GceClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def standard_mock_instance_groups(self, instance_map=None):
if instance_map is None:
instance_map = [
{'instance': 'https://gce.example.com/res/gce-instance-1'}
]
mock_instance_group_request = mock.MagicMock()
mock_instance_group_request.execute.return_value = {
'items': instance_map
}
service_attrs = {
'listInstances.return_value': mock_instance_group_request,
'listInstances_next.return_value': None,
}
mock_instance_groups = mock.Mock(**service_attrs)
return mock_instance_groups
def standard_mock_instances(self, instance_to_ip_map=None):
if instance_to_ip_map is None:
instance_to_ip_map = {
'gce-instance-1': '10.123.45.67'
}
mock_get_request = mock.MagicMock()
mock_get_request.execute.return_value = {
'networkInterfaces': [
{'networkIP': '10.123.45.67'}
]
}
def get_side_effect(project, zone, instance):
del project, zone # Unused
if instance in instance_to_ip_map:
mock_get_request = mock.MagicMock()
mock_get_request.execute.return_value = {
'networkInterfaces': [
{'networkIP': instance_to_ip_map[instance]}
]
}
return mock_get_request
else:
raise RuntimeError('Instance %s not found!' % instance)
service_attrs = {
'get.side_effect': get_side_effect,
}
mock_instances = mock.MagicMock(**service_attrs)
return mock_instances
def standard_mock_service_client(
self,
mock_instance_groups=None,
mock_instances=None):
if mock_instance_groups is None:
mock_instance_groups = self.standard_mock_instance_groups()
if mock_instances is None:
mock_instances = self.standard_mock_instances()
mock_client = mock.MagicMock()
mock_client.instanceGroups.return_value = mock_instance_groups
mock_client.instances.return_value = mock_instances
return mock_client
def gen_standard_mock_service_client(self, instances=None):
name_to_ip = {}
instance_list = []
for instance in instances:
name_to_ip[instance['name']] = instance['ip']
instance_list.append({
'instance': 'https://gce.example.com/gce/res/' + instance['name']
})
mock_instance = self.standard_mock_instances(name_to_ip)
mock_instance_group = self.standard_mock_instance_groups(instance_list)
return self.standard_mock_service_client(mock_instance_group, mock_instance)
def testSimpleSuccessfulRetrieval(self):
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
port=8470,
credentials=None,
service=self.standard_mock_service_client())
actual_cluster_spec = gce_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: '10.123.45.67:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testMasterRetrieval(self):
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_index=0,
port=8470,
credentials=None,
service=self.standard_mock_service_client())
self.assertEqual(gce_cluster_resolver.master(), 'grpc://10.123.45.67:8470')
def testMasterRetrievalWithCustomTasks(self):
name_to_ip = [
{'name': 'instance1', 'ip': '10.1.2.3'},
{'name': 'instance2', 'ip': '10.2.3.4'},
{'name': 'instance3', 'ip': '10.3.4.5'},
]
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
port=8470,
credentials=None,
service=self.gen_standard_mock_service_client(name_to_ip))
self.assertEqual(
gce_cluster_resolver.master('worker', 2, 'test'),
'test://10.3.4.5:8470')
def testOverrideParameters(self):
name_to_ip = [
{'name': 'instance1', 'ip': '10.1.2.3'},
{'name': 'instance2', 'ip': '10.2.3.4'},
{'name': 'instance3', 'ip': '10.3.4.5'},
]
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_type='testworker',
port=8470,
credentials=None,
service=self.gen_standard_mock_service_client(name_to_ip))
gce_cluster_resolver.task_index = 1
gce_cluster_resolver.rpc_layer = 'test'
self.assertEqual(gce_cluster_resolver.task_type, 'testworker')
self.assertEqual(gce_cluster_resolver.task_index, 1)
self.assertEqual(gce_cluster_resolver.rpc_layer, 'test')
self.assertEqual(gce_cluster_resolver.master(), 'test://10.2.3.4:8470')
def testOverrideParametersWithZeroOrEmpty(self):
name_to_ip = [
{'name': 'instance1', 'ip': '10.1.2.3'},
{'name': 'instance2', 'ip': '10.2.3.4'},
{'name': 'instance3', 'ip': '10.3.4.5'},
]
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_type='',
task_index=1,
port=8470,
credentials=None,
service=self.gen_standard_mock_service_client(name_to_ip))
self.assertEqual(gce_cluster_resolver.master(
task_type='', task_index=0), 'grpc://10.1.2.3:8470')
def testCustomJobNameAndPortRetrieval(self):
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_type='custom',
port=2222,
credentials=None,
service=self.standard_mock_service_client())
actual_cluster_spec = gce_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'custom' tasks { key: 0 value: '10.123.45.67:2222' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testMultipleInstancesRetrieval(self):
name_to_ip = [
{'name': 'instance1', 'ip': '10.1.2.3'},
{'name': 'instance2', 'ip': '10.2.3.4'},
{'name': 'instance3', 'ip': '10.3.4.5'},
]
gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
port=8470,
credentials=None,
service=self.gen_standard_mock_service_client(name_to_ip))
actual_cluster_spec = gce_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' }
tasks { key: 1 value: '10.2.3.4:8470' }
tasks { key: 2 value: '10.3.4.5:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testUnionMultipleInstanceRetrieval(self):
worker1_name_to_ip = [
{'name': 'instance1', 'ip': '10.1.2.3'},
{'name': 'instance2', 'ip': '10.2.3.4'},
{'name': 'instance3', 'ip': '10.3.4.5'},
]
worker2_name_to_ip = [
{'name': 'instance4', 'ip': '10.4.5.6'},
{'name': 'instance5', 'ip': '10.5.6.7'},
{'name': 'instance6', 'ip': '10.6.7.8'},
]
ps_name_to_ip = [
{'name': 'ps1', 'ip': '10.100.1.2'},
{'name': 'ps2', 'ip': '10.100.2.3'},
]
worker1_gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_type='worker',
port=8470,
credentials=None,
service=self.gen_standard_mock_service_client(worker1_name_to_ip))
worker2_gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_type='worker',
port=8470,
credentials=None,
service=self.gen_standard_mock_service_client(worker2_name_to_ip))
ps_gce_cluster_resolver = GceClusterResolver(
project='test-project',
zone='us-east1-d',
instance_group='test-instance-group',
task_type='ps',
port=2222,
credentials=None,
service=self.gen_standard_mock_service_client(ps_name_to_ip))
union_cluster_resolver = UnionClusterResolver(worker1_gce_cluster_resolver,
worker2_gce_cluster_resolver,
ps_gce_cluster_resolver)
actual_cluster_spec = union_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: '10.100.1.2:2222' }
tasks { key: 1 value: '10.100.2.3:2222' } }
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' }
tasks { key: 1 value: '10.2.3.4:8470' }
tasks { key: 2 value: '10.3.4.5:8470' }
tasks { key: 3 value: '10.4.5.6:8470' }
tasks { key: 4 value: '10.5.6.7:8470' }
tasks { key: 5 value: '10.6.7.8:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
# Supporting only python >= 2.6
from __future__ import unicode_literals
from __future__ import print_function
from future.builtins import str as text
from future.builtins import (range, object)
from past.builtins import basestring
"""
:class:`ExcelTableDirective` implements the ``exceltable`` -directive.
"""
__docformat__ = 'restructuredtext'
__author__ = 'Juha Mustonen'
import os
import sys
import doctest
import re
import types
import logging
from datetime import datetime
# Import required docutils modules
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.tables import ListTable
from docutils import io, nodes, statemachine, utils, frontend
from docutils.utils import SystemMessagePropagation, Reporter
import sphinx
# Uses excellent module xlrd for reading Excel sheets
# Retrieve it from http://www.python-excel.org/
import xlrd
class Messenger(Reporter):
def __init__(self, src='sphinxcontrib.xyz'):
settings = frontend.OptionParser().get_default_values()
settings.report_level = 1
Reporter.__init__(self,
src,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler
)
self.log = logging.getLogger(src)
def debug(self, *msgs):
#return super(Messenger, self).debug(msg)
pass
def info(self, *msgs):
#return super(Messenger, self).info(msg)
pass
def warning(self, *msgs):
#super(Messenger, self).warning(msg)
return nodes.literal_block(text=self._prepare(msgs))
def error(self, *msgs):
#super(Messenger, self).error(msg)
text = self._prepare(msgs)
#self.log.error(text)
return nodes.literal_block(text=text)
def _prepare(self, *msgs):
return u' '.join([text(msg) for msg in msgs])
class DirectiveTemplate(Directive):
"""
Template intended for directive development, providing
few handy functions
"""
def _get_directive_path(self, path):
"""
Returns transformed path from the directive
option/content
"""
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = os.path.normpath(os.path.join(source_dir, path))
return utils.relative_path(None, path)
class ExcelTableDirective(ListTable, DirectiveTemplate):
"""
ExcelTableDirective implements the directive.
Directive allows to create RST tables from the contents
of the Excel sheet. The functionality is very similar to
csv-table (docutils) and xmltable (:mod:`sphinxcontrib.xmltable`).
Example of the directive:
.. code-block:: rest
.. exceltable::
:file: path/to/document.xls
:header: 1
"""
#required_arguments = 0
#optional_arguments = 0
has_content = False
option_spec = {
'file': directives.path,
'selection': directives.unchanged_required,
'encoding': directives.unchanged,
'header': directives.unchanged,
'sheet': directives.unchanged,
'class': directives.class_option,
'widths': directives.unchanged,
}
def run(self):
"""
Implements the directive
"""
# Get content and options
file_path = self.options.get('file', None)
selection = self.options.get('selection', 'A1:')
sheet = self.options.get('sheet', '0')
header = self.options.get('header', '0')
col_widths = self.options.get('widths', None)
# Divide the selection into from and to values
if u':' not in selection:
selection += u':'
fromcell, tocell = selection.split(u':')
if not fromcell:
fromcell = u'A1'
if not tocell:
tocell = None
#print selection, fromcell, tocell
if not file_path:
return [self._report(u'file_path -option missing')]
# Header option
header_rows = 0
if header and header.isdigit():
header_rows = int(header)
# Transform the path suitable for processing
file_path = self._get_directive_path(file_path)
print(u'file path: {0}'.format(file_path))
#try:
et = ExcelTable(open(file_path))
table = et.create_table(fromcell=fromcell, tocell=tocell,
nheader=header_rows, sheet=sheet)
#except Exception as e:
#raise e.with_traceback()
#return [msgr.error(u'Error occured while creating table: %s' % e)]
#pass
#print table
title, messages = self.make_title()
#node = nodes.Element() # anonymous container for parsing
#self.state.nested_parse(self.content, self.content_offset, node)
# If empty table is created
if not table:
self._report('The table generated from queries is empty')
return [nodes.paragraph(text='')]
try:
table_data = []
# If there is header defined, set the header-rows param and
# append the data in row =>. build_table_from_list handles the header generation
if header and not header.isdigit():
# Otherwise expect the header to be string with column names defined in
# it, separating the values with comma
header_rows = 1
table_data.append([nodes.paragraph(text=hcell.strip()) for hcell in header.split(',')])
# Put the given data in rst elements: paragraph
for row in table['headers']:
table_data.append([nodes.paragraph(text=cell['value']) for cell in row])
# Iterates rows: put the given data in rst elements
for row in table['rows']:
row_data = []
for cell in row:
class_data = ['']
# Node based on formatting rules
# NOTE: rst does not support nested, use class attribute instead
if cell['italic']:
class_data.append('italic')
if cell['bold']:
node = nodes.strong(text=cell['value'])
else:
node = nodes.paragraph(text=cell['value'])
# Add additional formatting as class attributes
node['classes'] = class_data
row_data.append([node])
# FIXME: style attribute does not get into writer
if cell['bgcolor']:
rgb = [text(val) for val in cell['bgcolor']]
node.attributes['style'] = 'background-color: rgb(%s);' % ','.join(rgb)
#print node
table_data.append(row_data)
# If there is no data at this point, throw an error
if not table_data:
return [msgr.error('Selection did not return any data')]
# Get params from data
num_cols = len(table_data[0])
# Get the widths for the columns:
# 1. Use provided info, if available
# 2. Use widths from the excelsheet
# 3. Use default widths (equal to all)
#
# Get content widths from the first row of the table
# if it fails, calculate default column widths
if col_widths:
col_widths = [int(width) for width in col_widths.split(',')]
else:
col_widths = [int(col['width']) for col in table['rows'][0]]
col_width_total = sum(col_widths)
col_widths = [int(width * 100/ col_width_total) for width in col_widths]
# If still empty for some reason, use default widths
if not col_widths:
col_widths = self.get_column_widths(num_cols)
stub_columns = 0
# Sanity checks
# Different amount of cells in first and second row (possibly header and 1 row)
if type(header) is not int:
if len(table_data) > 1 and len(table_data[0]) != len(table_data[1]):
error = msgr.error('Data amount mismatch: check the directive data and params')
return [error]
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation as detail:
return [detail.args[0]]
# Generate the table node from the given list of elements
table_node = self.build_table_from_list(
table_data, col_widths, header_rows, stub_columns)
# Optional class parameter
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
#print table_node
return [table_node] + messages
# TODO: Move away
msgr = Messenger('sphinxcontrib.exceltable')
class ExcelTable(object):
"""
Class generates the list based table from
the given excel-document, suitable for the directive.
Class also implements the custom query format,
is to use for the directive.::
>>> import os
>>> from sphinxcontrib import exceltable
>>>
>>> fo = open(os.path.join(os.path.dirname(exceltable.__file__),'../doc/example/cartoons.xls'), 'r+b')
>>> et = exceltable.ExcelTable(fo)
>>>
>>> table = et.create_table(fromcell='A1', tocell='C4')
>>> assert et.fromcell == (0, 0)
>>> assert et.tocell == (2,3)
>>>
>>> table = et.create_table(fromcell='B10', tocell='B11', sheet='big')
>>> assert et.fromcell == (1,9)
>>> assert et.tocell == (1,10)
"""
def __init__(self, fobj, encoding='utf-8'):
"""
"""
#msgr.error('Testing: {0}'.format(fobj))
#assert type(fobj) is file, u'File object type expected, {0} given'.format(type(fobj))
self.file_object = fobj
self.fromcell = (0, 0)
self.tocell = (0, 0)
# xlrd uses paths only
# TODO: Add support for remote files
self.book = xlrd.open_workbook(self.file_object.name,
encoding_override=encoding,
formatting_info=True)
def create_table(self, fromcell=None, tocell=None, nheader=0, sheet=0):
"""
Creates a table (as a list) based on given query and columns
fromcell:
The index of the cell where to begin. The default
is from the beginning of the data set (0, 0).
tocell:
The index of the cell where to end the selection.
Default is in the end of the data set.
nheader:
Number of lines which are considered as a header lines.
Normally, the value is 0 (default) or 1.
sheet:
Name or index of the sheet as string/unicode. The index starts from the 0
and is the default value. If numeric value is given, provide it in format::
et.create_table(fromcell='A1', tocell='B2', sheet='2')
"""
rows = []
# Select sheet by given index or name
if type(sheet) is int or sheet.isdigit():
sh1 = self.book.sheet_by_index(int(sheet))
else:
sh1 = self.book.sheet_by_name(sheet)
# Name selection, like: 'A1' or 'AB12'
if isinstance(fromcell, basestring):
match = re.match(r'(?P<chars>[A-Z]+)(?P<nums>[1-9]+[0-9]*)', fromcell)
if match:
parts = (match.group('chars'), int(match.group('nums')))
fromcell = toindex(*parts)
else:
fromcell = tuple([int(num) for num in fromcell.split(u',')])
# Name selection, like: 'A1' or 'AB12'
if isinstance(tocell, basestring):
match = re.match(r'(?P<chars>[A-Z]+)(?P<nums>[1-9]+[0-9]*)', tocell)
if match:
parts = (match.group('chars'), int(match.group('nums')))
tocell = toindex(*parts)
else:
tocell = tuple([int(num) for num in tocell.split(u',')])
if not fromcell:
fromcell = (0, 0)
# If ending cell is not given, calculate
# it from rows and cols
#print sh1.ncols, sh1.nrows
#print (tocell[0] > (sh1.ncols -1)) or (tocell[1] > (sh1.nrows -1))
maxrow_index = sh1.nrows -1
maxcol_index = sh1.ncols -1
if not tocell:
tocell = (maxcol_index, maxrow_index)
# If the value is bigger than the value, default to max value
if int(tocell[0]) > maxcol_index:
tocell = (maxcol_index, tocell[1])
# If the value is bigger than the value, default to max value
if int(tocell[1]) > maxrow_index:
tocell = (tocell[0], maxrow_index)
# Iterate columns
rows = {'headers': [], 'rows': []}
widths = []
for rnum in range(fromcell[1], tocell[1]+1):
# Iterate rows within column
cols = []
for cnum in range(fromcell[0], tocell[0]+1):
cell = sh1.cell(rnum, cnum)
width = sh1.computed_column_width(cnum)
# Put data
cell_data = {'type': 'row', 'width': width, 'value': self._get_value(cell)}
# If header row
if rnum < nheader:
cell_data['type'] = 'header'
# Get more format info for the cell
cell_data.update(self._get_formatting(cell))
cols.append(cell_data)
# If first column is header, their all headers - i think
if cols[0]['type'] == 'header':
rows['headers'].append(cols)
else:
rows['rows'].append(cols)
#widths_together = sum([cell['width'] for cols in rows])
#print widths_together
#widths = [round(val * 100.0 / widths_together) for val in widths]
# Store into object for validation purposes
self.fromcell = fromcell
self.tocell = tocell
return rows
def _get_value(self, cell):
"""
Returns the value of the xlrd Cell, based
on type.
"""
value = None
# String
if cell.ctype == xlrd.XL_CELL_TEXT:
return text(cell.value)
# Number: integer or float
if cell.ctype == xlrd.XL_CELL_NUMBER:
# There is no separation between integers
# and other numbers. Show it as integer if
# it seems like a one.
# NOTE: float.is_integer is available only in python 2.6 and above
if int(cell.value) == cell.value:
return u'%s' % int(cell.value)
return u'%s' % cell.value
# Date type
if cell.ctype == xlrd.XL_CELL_DATE:
value = xlrd.xldate_as_tuple(cell.value, 0)
date = datetime(
year=value[0],
month=value[1],
day=value[2],
hour=value[3],
minute=value[4],
second=value[5],
)
# Show more accurate value only if it exists
if not value[1]:
return u'%s' % value[0]
elif value[3] and value[4] and value[5]:
return text(date)
else:
# TODO: provide a way to define this
return text(date.strftime(u'%Y-%m-%d'))
# Boolean
if cell.ctype == xlrd.XL_CELL_BOOLEAN:
if cell.value:
return _(u'True')
return _(u'False')
# Error
if cell.ctype == xlrd.XL_CELL_ERROR:
return _(u'Error')
return u''
def _get_formatting(self, cell):
"""
Returns some format related information
about the given cell. The information is
required/handy when creating the table
cell:
Cell object where to get formatting for
Returns:
dictionary containing the formatting information
"""
format = {'bold':False, 'italic':False, 'bgcolor':None}
xf = self.book.xf_list[cell.xf_index]
font = self.book.font_list[xf.font_index]
# Weight: 400 (normal), 700 (bold)
if font.weight > 400:
format['bold'] = True
# Collect italic info
if font.italic:
format['italic'] = True
# Get bg color
bgcolor = self.book.colour_map[xf.background.background_colour_index]
if bgcolor:
format['bgcolor'] = bgcolor
return format
def toindex(col, row):
"""
Calculates the index number from
the Excel column name. Examples:
>>> from sphinxcontrib import exceltable
>>> exceltable.toindex('A', 1)
(0, 0)
>>> exceltable.toindex('B', 10)
(1, 9)
>>> exceltable.toindex('Z', 2)
(25, 1)
>>> exceltable.toindex('AA', 27)
(26, 26)
>>> exceltable.toindex('AB', 1)
(27, 0)
.. NOTE::
Following the naming in Excel/OOCalc,
the row 'index' starts from the 1 and not from 0
"""
a2z = 'ABCDEFGHIJLKMNOPQRSTUVWXYZ'
total = 0
mult = 0
for char in col:
total += (a2z.find(char) + (26 * mult))
mult += 1
return total, row-1
def toname(colx, rowy):
"""
Opposite to `toindex`
"""
colname = xlrd.colname(colx)
return colname, rowy+1
def setup(app):
"""
Extension setup, called by Sphinx
"""
# Sphinx 0.5 support
if '5' in sphinx.__version__.split('.'):
app.add_directive('exceltable', ExcelTableDirective, 0, (0, 0, 0))
else:
app.add_directive('exceltable', ExcelTableDirective)
if __name__ == '__main__':
_test()
|
|
# -*- coding: utf-8 -*-
"""The SleuthKit (TSK) file entry implementation."""
import pytsk3
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import tsk_path_spec
from dfvfs.vfs import file_entry
from dfvfs.vfs import vfs_stat
class TSKDirectory(file_entry.Directory):
"""Class that implements a directory object using pytsk3."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
A path specification (instance of path.TSKPathSpec).
"""
# Opening a file by inode number is faster than opening a file
# by location.
inode = getattr(self.path_spec, u'inode', None)
location = getattr(self.path_spec, u'location', None)
fs_info = self._file_system.GetFsInfo()
if inode is not None:
tsk_directory = fs_info.open_dir(inode=inode)
elif location is not None:
tsk_directory = fs_info.open_dir(path=location)
else:
return
for tsk_directory_entry in tsk_directory:
# Note that because pytsk3.Directory does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_directory_entry, u'info', None) is None:
continue
# Note that because pytsk3.TSK_FS_FILE does not explicitly defines fs_info
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_directory_entry.info, u'fs_info', None) is None:
continue
# Note that because pytsk3.TSK_FS_FILE does not explicitly defines meta
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_directory_entry.info, u'meta', None) is None:
# Most directory entries will have an "inode" but not all, e.g.
# previously deleted files. Currently directory entries without
# a pytsk3.TSK_FS_META object are ignored.
continue
# Note that because pytsk3.TSK_FS_META does not explicitly defines addr
# we need to check if the attribute exists.
if not hasattr(tsk_directory_entry.info.meta, u'addr'):
continue
directory_entry_inode = tsk_directory_entry.info.meta.addr
directory_entry = None
# Ignore references to self.
if directory_entry_inode == inode:
continue
# TODO: need better file system support.
# On non-NTFS file systems ignore inode 0.
ftype = tsk_directory_entry.info.fs_info.ftype
if directory_entry_inode == 0 and ftype not in [
pytsk3.TSK_FS_TYPE_NTFS, pytsk3.TSK_FS_TYPE_NTFS_DETECT]:
continue
# Note that because pytsk3.TSK_FS_FILE does not explicitly defines name
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_directory_entry.info, u'name', None) is not None:
directory_entry = getattr(tsk_directory_entry.info.name, u'name', u'')
# pytsk3 returns a UTF-8 encoded byte string.
try:
directory_entry = directory_entry.decode(u'utf8')
except UnicodeError:
# Continue here since we cannot represent the directory entry.
continue
if directory_entry:
# Ignore references to self or parent.
if directory_entry in [u'.', u'..']:
continue
if location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield tsk_path_spec.TSKPathSpec(
inode=directory_entry_inode, location=directory_entry,
parent=self.path_spec.parent)
class TSKFileEntry(file_entry.FileEntry):
"""Class that implements a file entry object using pytsk3."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_TSK
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False, tsk_file=None, parent_inode=None):
"""Initializes the file entry object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
file_system: the file system object (instance of vfs.FileSystem).
path_spec: the path specification (instance of path.PathSpec).
is_root: optional boolean value to indicate if the file entry is
the root file entry of the corresponding file system.
The default is False.
is_virtual: optional boolean value to indicate if the file entry is
a virtual file entry emulated by the corresponding file
system. The default is False.
tsk_file: optional file object (instance of pytsk3.File).
The default is None.
parent_inode: optional parent inode number. The default is None.
"""
super(TSKFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._link = None
self._name = None
self._parent_inode = parent_inode
self._tsk_file = tsk_file
def _GetDirectory(self):
"""Retrieves the directory object (instance of TSKDirectory)."""
if self._stat_object is None:
self._stat_object = self._GetStat()
if (self._stat_object and
self._stat_object.type == self._stat_object.TYPE_DIRECTORY):
return TSKDirectory(self._file_system, self.path_spec)
return
def _GetStat(self):
"""Retrieves the stat object.
Returns:
The stat object (instance of vfs.VFSStat).
Raises:
BackEndError: when the tsk File .info or .info.meta attribute is missing.
"""
tsk_file = self.GetTSKFile()
if not tsk_file or not tsk_file.info or not tsk_file.info.meta:
raise errors.BackEndError(u'Missing tsk File .info or .info.meta.')
stat_object = vfs_stat.VFSStat()
# File data stat information.
stat_object.size = getattr(tsk_file.info.meta, u'size', None)
# Date and time stat information.
stat_object.atime = getattr(tsk_file.info.meta, u'atime', None)
stat_object.atime_nano = getattr(
tsk_file.info.meta, u'atime_nano', None)
stat_object.bkup_time = getattr(
tsk_file.info.meta, u'bkup_time', None)
stat_object.bkup_time_nano = getattr(
tsk_file.info.meta, u'bkup_time_nano', None)
stat_object.ctime = getattr(tsk_file.info.meta, u'ctime', None)
stat_object.ctime_nano = getattr(
tsk_file.info.meta, u'ctime_nano', None)
stat_object.crtime = getattr(tsk_file.info.meta, u'crtime', None)
stat_object.crtime_nano = getattr(
tsk_file.info.meta, u'crtime_nano', None)
stat_object.dtime = getattr(tsk_file.info.meta, u'dtime', None)
stat_object.dtime_nano = getattr(
tsk_file.info.meta, u'dtime_nano', None)
stat_object.mtime = getattr(tsk_file.info.meta, u'mtime', None)
stat_object.mtime_nano = getattr(
tsk_file.info.meta, u'mtime_nano', None)
# Ownership and permissions stat information.
stat_object.mode = getattr(tsk_file.info.meta, u'mode', None)
stat_object.uid = getattr(tsk_file.info.meta, u'uid', None)
stat_object.gid = getattr(tsk_file.info.meta, u'gid', None)
# File entry type stat information.
# The type is an instance of pytsk3.TSK_FS_META_TYPE_ENUM.
tsk_fs_meta_type = getattr(
tsk_file.info.meta, u'type', pytsk3.TSK_FS_META_TYPE_UNDEF)
if tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_REG:
stat_object.type = stat_object.TYPE_FILE
elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_DIR:
stat_object.type = stat_object.TYPE_DIRECTORY
elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_LNK:
stat_object.type = stat_object.TYPE_LINK
elif (tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_CHR or
tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_BLK):
stat_object.type = stat_object.TYPE_DEVICE
elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_FIFO:
stat_object.type = stat_object.TYPE_PIPE
elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_SOCK:
stat_object.type = stat_object.TYPE_SOCKET
# TODO: implement support for:
# pytsk3.TSK_FS_META_TYPE_UNDEF
# pytsk3.TSK_FS_META_TYPE_SHAD
# pytsk3.TSK_FS_META_TYPE_WHT
# pytsk3.TSK_FS_META_TYPE_VIRT
# Other stat information.
stat_object.ino = getattr(tsk_file.info.meta, u'addr', None)
# stat_object.dev = stat_info.st_dev
# stat_object.nlink = getattr(tsk_file.info.meta, u'nlink', None)
# stat_object.fs_type = u'Unknown'
flags = getattr(tsk_file.info.meta, u'flags', 0)
# The flags are an instance of pytsk3.TSK_FS_META_FLAG_ENUM.
if int(flags) & pytsk3.TSK_FS_META_FLAG_ALLOC:
stat_object.is_allocated = True
else:
stat_object.is_allocated = False
return stat_object
@property
def link(self):
"""The full path of the linked file entry."""
if self._link is None:
self._link = u''
if not self.IsLink():
return self._link
tsk_file = self.GetTSKFile()
# Note that because pytsk3.File does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_file, u'info', None) is None:
return self._link
# If pytsk3.FS_Info.open() was used file.info has an attribute meta
# (pytsk3.TSK_FS_META) that contains the link.
if getattr(tsk_file.info, u'meta', None) is None:
return self._link
link = getattr(tsk_file.info.meta, u'link', None)
if link is None:
return self._link
# pytsk3 returns a UTF-8 encoded byte string without a leading
# path segment separator.
try:
link = u'{0:s}{1:s}'.format(
self._file_system.PATH_SEPARATOR, link.decode(u'utf8'))
except UnicodeError:
raise errors.BackEndError(
u'pytsk3 returned a non UTF-8 formatted link.')
self._link = link
return self._link
@property
def name(self):
"""The name of the file entry, which does not include the full path.
Raises:
BackEndError: when the pytsk3 returns a non UTF-8 formatted name.
"""
if self._name is None:
tsk_file = self.GetTSKFile()
# Note that because pytsk3.File does not explicitly defines info
# we need to check if the attribute exists and has a value other
# than None.
if getattr(tsk_file, u'info', None) is None:
return
# If pytsk3.FS_Info.open() was used file.info has an attribute name
# (pytsk3.TSK_FS_FILE) that contains the name string. Otherwise the
# name from the path specification is used.
if getattr(tsk_file.info, u'name', None) is not None:
name = getattr(tsk_file.info.name, u'name', None)
# pytsk3 returns a UTF-8 encoded byte string.
try:
self._name = name.decode(u'utf8')
except UnicodeError:
raise errors.BackEndError(
u'pytsk3 returned a non UTF-8 formatted name.')
else:
location = getattr(self.path_spec, u'location', None)
if location:
self._name = self._file_system.BasenamePath(location)
return self._name
@property
def sub_file_entries(self):
"""The sub file entries (generator of instance of vfs.FileEntry)."""
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory:
for path_spec in self._directory.entries:
yield TSKFileEntry(self._resolver_context, self._file_system, path_spec)
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link."""
if not self.link:
return
# TODO: is there a way to determine the link inode number here?
link_inode = None
parent_path_spec = getattr(self.path_spec, u'parent', None)
path_spec = tsk_path_spec.TSKPathSpec(
location=self.link, parent=parent_path_spec)
root_inode = self._file_system.GetRootInode()
if (self.link == self._file_system.LOCATION_ROOT or
(link_inode is not None and root_inode is not None and
link_inode == root_inode)):
is_root = True
else:
is_root = False
return TSKFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
def GetParentFileEntry(self):
"""Retrieves the parent file entry."""
location = getattr(self.path_spec, u'location', None)
if location is None:
return
parent_inode = self._parent_inode
parent_location = self._file_system.DirnamePath(location)
if parent_inode is None and parent_location is None:
return
if parent_location == u'':
parent_location = self._file_system.PATH_SEPARATOR
root_inode = self._file_system.GetRootInode()
if (parent_location == self._file_system.LOCATION_ROOT or
(parent_inode is not None and root_inode is not None and
parent_inode == root_inode)):
is_root = True
else:
is_root = False
parent_path_spec = getattr(self.path_spec, u'parent', None)
path_spec = tsk_path_spec.TSKPathSpec(
inode=parent_inode, location=parent_location, parent=parent_path_spec)
return TSKFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
def GetTSKFile(self):
"""Retrieves the SleuthKit file object (instance of pytsk3.File).
Raises:
PathSpecError: when the path specification is missing inode and location.
"""
if not self._tsk_file:
# Opening a file by inode number is faster than opening a file
# by location.
inode = getattr(self.path_spec, u'inode', None)
location = getattr(self.path_spec, u'location', None)
fs_info = self._file_system.GetFsInfo()
if inode is not None:
self._tsk_file = fs_info.open_meta(inode=inode)
elif location is not None:
self._tsk_file = fs_info.open(location)
else:
raise errors.PathSpecError(
u'Path specification missing inode and location.')
return self._tsk_file
|
|
from __future__ import absolute_import
from .consts import *
from .utils import *
from six.moves import map
from six.moves import range
class _dumb_repr(object):
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.__dict__)
class SWFRawTag(_dumb_repr):
def __init__(self, s=None):
if not s is None:
self.parse(s)
def parse(self, s):
pos = s.tell()
self.header = s.readtag_header()
self.pos_content = s.tell()
s.f.seek(pos)
#self.bytes = s.f.read(self.header.tag_length())
#s.f.seek(self.pos_content)
class SWFStraightEdge(_dumb_repr):
def __init__(self, start, to, line_style_idx, fill_style_idx):
self.start = start
self.to = to
self.line_style_idx = line_style_idx
self.fill_style_idx = fill_style_idx
def reverse_with_new_fillstyle(self, new_fill_idx):
return SWFStraightEdge(self.to, self.start, self.line_style_idx, new_fill_idx)
class SWFCurvedEdge(SWFStraightEdge):
def __init__(self, start, control, to, line_style_idx, fill_style_idx):
super(SWFCurvedEdge, self).__init__(start, to, line_style_idx, fill_style_idx)
self.control = control
def reverse_with_new_fillstyle(self, new_fill_idx):
return SWFCurvedEdge(self.to, self.control, self.start, self.line_style_idx, new_fill_idx)
class SWFShape(_dumb_repr):
def __init__(self, data=None, level=1, unit_divisor=20.0):
self._records = []
self._fillStyles = []
self._lineStyles = []
self._postLineStyles = {}
self._edgeMapsCreated = False
self.unit_divisor = unit_divisor
self.fill_edge_maps = []
self.line_edge_maps = []
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups = 0
self.coord_map = {}
if not data is None:
self.parse(data, level)
def get_dependencies(self):
s = set()
for x in self._fillStyles:
s.update(x.get_dependencies())
for x in self._lineStyles:
s.update(x.get_dependencies())
return s
def parse(self, data, level=1):
data.reset_bits_pending()
fillbits = data.readUB(4)
linebits = data.readUB(4)
self.read_shape_records(data, fillbits, linebits, level)
def export(self, handler=None):
self._create_edge_maps()
if handler is None:
from .export import SVGShapeExporter
handler = SVGShapeExporter()
handler.begin_shape()
for i in range(0, self.num_groups):
self._export_fill_path(handler, i)
self._export_line_path(handler, i)
handler.end_shape()
return handler
@property
def records(self):
return self._records
def read_shape_records(self, data, fill_bits, line_bits, level=1):
shape_record = None
record_id = 0
while type(shape_record) != SWFShapeRecordEnd:
# The SWF10 spec says that shape records are byte aligned.
# In reality they seem not to be?
# bitsPending = 0;
edge_record = (data.readUB(1) == 1)
if edge_record:
straight_flag = (data.readUB(1) == 1)
num_bits = data.readUB(4) + 2
if straight_flag:
shape_record = data.readSTRAIGHTEDGERECORD(num_bits)
else:
shape_record = data.readCURVEDEDGERECORD(num_bits)
else:
states= data.readUB(5)
if states == 0:
shape_record = SWFShapeRecordEnd()
else:
style_change_record = data.readSTYLECHANGERECORD(states, fill_bits, line_bits, level)
if style_change_record.state_new_styles:
fill_bits = style_change_record.num_fillbits
line_bits = style_change_record.num_linebits
shape_record = style_change_record
shape_record.record_id = record_id
self._records.append(shape_record)
record_id += 1
#print shape_record.tostring()
def _create_edge_maps(self):
if self._edgeMapsCreated:
return
xPos = 0
yPos = 0
sub_path = []
fs_offset = 0
ls_offset = 0
curr_fs_idx0 = 0
curr_fs_idx1 = 0
curr_ls_idx = 0
self.fill_edge_maps = []
self.line_edge_maps = []
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups = 0
for i in range(0, len(self._records)):
rec = self._records[i]
if rec.type == SWFShapeRecord.TYPE_STYLECHANGE:
if rec.state_line_style or rec.state_fill_style0 or rec.state_fill_style1:
if len(sub_path):
self._process_sub_path(sub_path, curr_ls_idx, curr_fs_idx0, curr_fs_idx1, rec.record_id)
sub_path = []
if rec.state_new_styles:
fs_offset = len(self._fillStyles)
ls_offset = len(self._lineStyles)
self._append_to(self._fillStyles, rec.fill_styles)
self._append_to(self._lineStyles, rec.line_styles)
if rec.state_line_style and rec.state_fill_style0 and rec.state_fill_style1 and \
rec.line_style == 0 and rec.fill_style0 == 0 and rec.fill_style1 == 0:
# new group (probably)
self._clean_edge_map(self.current_fill_edge_map)
self._clean_edge_map(self.current_line_edge_map)
self.fill_edge_maps.append(self.current_fill_edge_map)
self.line_edge_maps.append(self.current_line_edge_map)
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups += 1
curr_fs_idx0 = 0
curr_fs_idx1 = 0
curr_ls_idx = 0
else:
if rec.state_line_style:
curr_ls_idx = rec.line_style
if curr_ls_idx > 0:
curr_ls_idx += ls_offset
if rec.state_fill_style0:
curr_fs_idx0 = rec.fill_style0
if curr_fs_idx0 > 0:
curr_fs_idx0 += fs_offset
if rec.state_fill_style1:
curr_fs_idx1 = rec.fill_style1
if curr_fs_idx1 > 0:
curr_fs_idx1 += fs_offset
if rec.state_moveto:
xPos = rec.move_deltaX
yPos = rec.move_deltaY
elif rec.type == SWFShapeRecord.TYPE_STRAIGHTEDGE:
start = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
if rec.general_line_flag:
xPos += rec.deltaX
yPos += rec.deltaY
else:
if rec.vert_line_flag:
yPos += rec.deltaY
else:
xPos += rec.deltaX
to = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
sub_path.append(SWFStraightEdge(start, to, curr_ls_idx, curr_fs_idx1))
elif rec.type == SWFShapeRecord.TYPE_CURVEDEDGE:
start = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
xPosControl = xPos + rec.control_deltaX
yPosControl = yPos + rec.control_deltaY
xPos = xPosControl + rec.anchor_deltaX
yPos = yPosControl + rec.anchor_deltaY
control = [xPosControl, yPosControl]
to = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
sub_path.append(SWFCurvedEdge(start, control, to, curr_ls_idx, curr_fs_idx1))
elif rec.type == SWFShapeRecord.TYPE_END:
# We're done. Process the last subpath, if any
if len(sub_path) > 0:
self._process_sub_path(sub_path, curr_ls_idx, curr_fs_idx0, curr_fs_idx1, rec.record_id)
self._clean_edge_map(self.current_fill_edge_map)
self._clean_edge_map(self.current_line_edge_map)
self.fill_edge_maps.append(self.current_fill_edge_map)
self.line_edge_maps.append(self.current_line_edge_map)
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups += 1
curr_fs_idx0 = 0
curr_fs_idx1 = 0
curr_ls_idx = 0
self._edgeMapsCreated = True
def _process_sub_path(self, sub_path, linestyle_idx, fillstyle_idx0, fillstyle_idx1, record_id=-1):
path = None
if fillstyle_idx0 != 0:
if not fillstyle_idx0 in self.current_fill_edge_map:
path = self.current_fill_edge_map[fillstyle_idx0] = []
else:
path = self.current_fill_edge_map[fillstyle_idx0]
for j in range(len(sub_path) - 1, -1, -1):
path.append(sub_path[j].reverse_with_new_fillstyle(fillstyle_idx0))
if fillstyle_idx1 != 0:
if not fillstyle_idx1 in self.current_fill_edge_map:
path = self.current_fill_edge_map[fillstyle_idx1] = []
else:
path = self.current_fill_edge_map[fillstyle_idx1]
self._append_to(path, sub_path)
if linestyle_idx != 0:
if not linestyle_idx in self.current_line_edge_map:
path = self.current_line_edge_map[linestyle_idx] = []
else:
path = self.current_line_edge_map[linestyle_idx]
self._append_to(path, sub_path)
def _clean_edge_map(self, edge_map):
for style_idx in edge_map:
sub_path = edge_map[style_idx] if style_idx in edge_map else None
if sub_path is not None and len(sub_path) > 0:
tmp_path = []
prev_edge = None
self._create_coord_map(sub_path)
while len(sub_path) > 0:
idx = 0
while idx < len(sub_path):
if prev_edge is None or self._equal_point(prev_edge.to, sub_path[idx].start):
edge = sub_path[idx]
del sub_path[idx]
tmp_path.append(edge)
self._remove_edge_from_coord_map(edge)
prev_edge = edge
else:
edge = self._find_next_edge_in_coord_map(prev_edge)
if not edge is None:
idx = sub_path.index(edge)
else:
idx = 0
prev_edge = None
edge_map[style_idx] = tmp_path
def _equal_point(self, a, b, tol=0.001):
return (a[0] > b[0]-tol and a[0] < b[0]+tol and a[1] > b[1]-tol and a[1] < b[1]+tol)
def _find_next_edge_in_coord_map(self, edge):
key = "%0.4f_%0.4f" % (edge.to[0], edge.to[1])
if key in self.coord_map and len(self.coord_map[key]) > 0:
return self.coord_map[key][0]
else:
return None
def _create_coord_map(self, path):
self.coord_map = {}
for i in range(0, len(path)):
start = path[i].start
key = "%0.4f_%0.4f" % (start[0], start[1])
coord_map_array = self.coord_map[key] if key in self.coord_map else None
if coord_map_array is None:
self.coord_map[key] = [path[i]]
else:
self.coord_map[key].append(path[i])
def _remove_edge_from_coord_map(self, edge):
key = "%0.4f_%0.4f" % (edge.start[0], edge.start[1])
if key in self.coord_map:
coord_map_array = self.coord_map[key]
if len(coord_map_array) == 1:
del self.coord_map[key]
else:
try:
idx = coord_map_array.index(edge)
del coord_map_array[idx]
except:
pass
def _create_path_from_edge_map(self, edge_map):
new_path = []
style_ids = []
for style_id in edge_map:
style_ids.append(int(style_id))
style_ids = sorted(style_ids)
for i in range(0, len(style_ids)):
self._append_to(new_path, edge_map[style_ids[i]])
return new_path
def _export_fill_path(self, handler, group_index):
path = self._create_path_from_edge_map(self.fill_edge_maps[group_index])
pos = [100000000, 100000000]
u = 1.0 / self.unit_divisor
fill_style_idx = 10000000
if len(path) < 1:
return
handler.begin_fills()
for i in range(0, len(path)):
e = path[i]
if fill_style_idx != e.fill_style_idx:
fill_style_idx = e.fill_style_idx
pos = [100000000, 100000000]
try:
fill_style = self._fillStyles[fill_style_idx - 1] if fill_style_idx > 0 else None
if fill_style.type == 0x0:
# solid fill
handler.begin_fill(
ColorUtils.rgb(fill_style.rgb),
ColorUtils.alpha(fill_style.rgb))
elif fill_style.type in [0x10, 0x12, 0x13]:
# gradient fill
colors = []
ratios = []
alphas = []
for j in range(0, len(fill_style.gradient.records)):
gr = fill_style.gradient.records[j]
colors.append(ColorUtils.rgb(gr.color))
ratios.append(gr.ratio)
alphas.append(ColorUtils.alpha(gr.color))
handler.begin_gradient_fill(
GradientType.LINEAR if fill_style.type == 0x10 else GradientType.RADIAL,
colors, alphas, ratios,
fill_style.gradient_matrix,
fill_style.gradient.spreadmethod,
fill_style.gradient.interpolation_mode,
fill_style.gradient.focal_point
)
elif fill_style.type in [0x40, 0x41, 0x42, 0x43]:
# bitmap fill
handler.begin_bitmap_fill(
fill_style.bitmap_id,
fill_style.bitmap_matrix,
(fill_style.type == 0x40 or fill_style.type == 0x42),
(fill_style.type == 0x40 or fill_style.type == 0x41)
)
pass
except:
# Font shapes define no fillstyles per se, but do reference fillstyle index 1,
# which represents the font color. We just report solid black in this case.
handler.begin_fill(0)
if not self._equal_point(pos, e.start):
handler.move_to(e.start[0] * u, e.start[1] * u)
if type(e) is SWFCurvedEdge:
handler.curve_to(e.control[0] * u, e.control[1] * u, e.to[0] * u, e.to[1] * u)
else:
handler.line_to(e.to[0] * u, e.to[1] * u)
pos = e.to
handler.end_fill()
handler.end_fills()
def _export_line_path(self, handler, group_index):
path = self._create_path_from_edge_map(self.line_edge_maps[group_index])
pos = [100000000, 100000000]
u = 1.0 / self.unit_divisor
line_style_idx = 10000000
line_style = None
if len(path) < 1:
return
handler.begin_lines()
for i in range(0, len(path)):
e = path[i]
if line_style_idx != e.line_style_idx:
line_style_idx = e.line_style_idx
pos = [100000000, 100000000]
try:
line_style = self._lineStyles[line_style_idx - 1]
except:
line_style = None
if line_style is not None:
scale_mode = LineScaleMode.NORMAL
if line_style.no_hscale_flag and line_style.no_vscale_flag:
scale_mode = LineScaleMode.NONE
elif line_style.no_hscale_flag:
scale_mode = LineScaleMode.HORIZONTAL
elif line_style.no_hscale_flag:
scale_mode = LineScaleMode.VERTICAL
if not line_style.has_fill_flag:
handler.line_style(
line_style.width / 20.0,
ColorUtils.rgb(line_style.color),
ColorUtils.alpha(line_style.color),
line_style.pixelhinting_flag,
scale_mode,
line_style.start_caps_style,
line_style.end_caps_style,
line_style.joint_style,
line_style.miter_limit_factor)
else:
fill_style = line_style.fill_type
if fill_style.type in [0x10, 0x12, 0x13]:
# gradient fill
colors = []
ratios = []
alphas = []
for j in range(0, len(fill_style.gradient.records)):
gr = fill_style.gradient.records[j]
colors.append(ColorUtils.rgb(gr.color))
ratios.append(gr.ratio)
alphas.append(ColorUtils.alpha(gr.color))
handler.line_gradient_style(
line_style.width / 20.0,
line_style.pixelhinting_flag,
scale_mode,
line_style.start_caps_style,
line_style.end_caps_style,
line_style.joint_style,
line_style.miter_limit_factor,
GradientType.LINEAR if fill_style.type == 0x10 else GradientType.RADIAL,
colors, alphas, ratios,
fill_style.gradient_matrix,
fill_style.gradient.spreadmethod,
fill_style.gradient.interpolation_mode,
fill_style.gradient.focal_point
)
elif fill_style.type in [0x40, 0x41, 0x42]:
handler.line_bitmap_style(
line_style.width / 20.0,
line_style.pixelhinting_flag,
scale_mode,
line_style.start_caps_style,
line_style.end_caps_style,
line_style.joint_style,
line_style.miter_limit_factor,
fill_style.bitmap_id, fill_style.bitmap_matrix,
(fill_style.type == 0x40 or fill_style.type == 0x42),
(fill_style.type == 0x40 or fill_style.type == 0x41)
)
else:
# we should never get here
handler.line_style(0)
if not self._equal_point(pos, e.start):
handler.move_to(e.start[0] * u, e.start[1] * u)
if type(e) is SWFCurvedEdge:
handler.curve_to(e.control[0] * u, e.control[1] * u, e.to[0] * u, e.to[1] * u)
else:
handler.line_to(e.to[0] * u, e.to[1] * u)
pos = e.to
handler.end_lines()
def _append_to(self, v1, v2):
for i in range(0, len(v2)):
v1.append(v2[i])
def __str__(self):
return "[SWFShape]"
class SWFShapeWithStyle(SWFShape):
def __init__(self, data, level, unit_divisor):
self._initialFillStyles = []
self._initialLineStyles = []
super(SWFShapeWithStyle, self).__init__(data, level, unit_divisor)
def export(self, handler=None):
self._fillStyles.extend(self._initialFillStyles)
self._lineStyles.extend(self._initialLineStyles)
return super(SWFShapeWithStyle, self).export(handler)
def get_dependencies(self):
s = set()
for x in self._fillStyles + self._initialFillStyles:
s.update(x.get_dependencies())
for x in self._lineStyles + self._initialLineStyles:
s.update(x.get_dependencies())
return s
def parse(self, data, level=1):
data.reset_bits_pending()
num_fillstyles = self.readstyle_array_length(data, level)
for i in range(0, num_fillstyles):
self._initialFillStyles.append(data.readFILLSTYLE(level))
num_linestyles = self.readstyle_array_length(data, level)
for i in range(0, num_linestyles):
if level <= 3:
self._initialLineStyles.append(data.readLINESTYLE(level))
else:
self._initialLineStyles.append(data.readLINESTYLE2(level))
num_fillbits = data.readUB(4)
num_linebits = data.readUB(4)
data.reset_bits_pending()
self.read_shape_records(data, num_fillbits, num_linebits, level)
def readstyle_array_length(self, data, level=1):
length = data.readUI8()
if level >= 2 and length == 0xff:
length = data.readUI16()
return length
def __str__(self):
s = " FillStyles:\n" if len(self._fillStyles) > 0 else ""
for i in range(0, len(self._initialFillStyles)):
s += " %d:%s\n" % (i+1, self._initialFillStyles[i].__str__())
if len(self._initialLineStyles) > 0:
s += " LineStyles:\n"
for i in range(0, len(self._initialLineStyles)):
s += " %d:%s\n" % (i+1, self._initialLineStyles[i].__str__())
for record in self._records:
s += record.__str__() + '\n'
return s.rstrip() + super(SWFShapeWithStyle, self).__str__()
class SWFShapeRecord(_dumb_repr):
TYPE_UNKNOWN = 0
TYPE_END = 1
TYPE_STYLECHANGE = 2
TYPE_STRAIGHTEDGE = 3
TYPE_CURVEDEDGE = 4
record_id = -1
def __init__(self, data=None, level=1):
if not data is None:
self.parse(data, level)
@property
def is_edge_record(self):
return (self.type == SWFShapeRecord.TYPE_STRAIGHTEDGE or
self.type == SWFShapeRecord.TYPE_CURVEDEDGE)
def parse(self, data, level=1):
pass
@property
def type(self):
return SWFShapeRecord.TYPE_UNKNOWN
def __str__(self):
return " [SWFShapeRecord]"
class SWFShapeRecordStraightEdge(SWFShapeRecord):
def __init__(self, data, num_bits=0, level=1):
self.num_bits = num_bits
super(SWFShapeRecordStraightEdge, self).__init__(data, level)
def parse(self, data, level=1):
self.general_line_flag = (data.readUB(1) == 1)
self.vert_line_flag = False if self.general_line_flag else (data.readUB(1) == 1)
self.deltaX = data.readSB(self.num_bits) \
if self.general_line_flag or not self.vert_line_flag \
else 0.0
self.deltaY = data.readSB(self.num_bits) \
if self.general_line_flag or self.vert_line_flag \
else 0.0
@property
def type(self):
return SWFShapeRecord.TYPE_STRAIGHTEDGE
def __str__(self):
s = " [SWFShapeRecordStraightEdge]"
if self.general_line_flag:
s += " General: %d %d" % (self.deltaX, self.deltaY)
else:
if self.vert_line_flag:
s += " Vertical: %d" % self.deltaY
else:
s += " Horizontal: %d" % self.deltaX
return s
class SWFShapeRecordCurvedEdge(SWFShapeRecord):
def __init__(self, data, num_bits=0, level=1):
self.num_bits = num_bits
super(SWFShapeRecordCurvedEdge, self).__init__(data, level)
def parse(self, data, level=1):
self.control_deltaX = data.readSB(self.num_bits)
self.control_deltaY = data.readSB(self.num_bits)
self.anchor_deltaX = data.readSB(self.num_bits)
self.anchor_deltaY = data.readSB(self.num_bits)
@property
def type(self):
return SWFShapeRecord.TYPE_CURVEDEDGE
def __str__(self):
return " [SWFShapeRecordCurvedEdge]" + \
" ControlDelta: %d, %d" % (self.control_deltaX, self.control_deltaY) + \
" AnchorDelta: %d, %d" % (self.anchor_deltaX, self.anchor_deltaY)
class SWFShapeRecordStyleChange(SWFShapeRecord):
def __init__(self, data, states=0, fill_bits=0, line_bits=0, level=1):
self.fill_styles = []
self.line_styles = []
self.state_new_styles = ((states & 0x10) != 0)
self.state_line_style = ((states & 0x08) != 0)
self.state_fill_style1 = ((states & 0x4) != 0)
self.state_fill_style0 = ((states & 0x2) != 0)
self.state_moveto = ((states & 0x1) != 0)
self.num_fillbits = fill_bits
self.num_linebits = line_bits
self.move_deltaX = 0.0
self.move_deltaY = 0.0
self.fill_style0 = 0
self.fill_style1 = 0
self.line_style = 0
super(SWFShapeRecordStyleChange, self).__init__(data, level)
def parse(self, data, level=1):
if self.state_moveto:
movebits = data.readUB(5)
self.move_deltaX = data.readSB(movebits)
self.move_deltaY = data.readSB(movebits)
self.fill_style0 = data.readUB(self.num_fillbits) if self.state_fill_style0 else 0
self.fill_style1 = data.readUB(self.num_fillbits) if self.state_fill_style1 else 0
self.line_style = data.readUB(self.num_linebits) if self.state_line_style else 0
if self.state_new_styles:
data.reset_bits_pending();
num_fillstyles = self.readstyle_array_length(data, level)
for i in range(0, num_fillstyles):
self.fill_styles.append(data.readFILLSTYLE(level))
num_linestyles = self.readstyle_array_length(data, level)
for i in range(0, num_linestyles):
if level <= 3:
self.line_styles.append(data.readLINESTYLE(level))
else:
self.line_styles.append(data.readLINESTYLE2(level))
self.num_fillbits = data.readUB(4)
self.num_linebits = data.readUB(4)
@property
def type(self):
return SWFShapeRecord.TYPE_STYLECHANGE
def readstyle_array_length(self, data, level=1):
length = data.readUI8()
if level >= 2 and length == 0xff:
length = data.readUI16()
return length
def __str__(self):
return " [SWFShapeRecordStyleChange]" + \
" moveTo: %d %d" % (self.move_deltaX, self.move_deltaY) + \
" fs0: %d" % self.fill_style0 + \
" fs1: %d" % self.fill_style1 + \
" linestyle: %d" % self.line_style + \
" flags: %d %d %d" % (self.state_fill_style0, self.state_fill_style1, self.state_line_style)
class SWFShapeRecordEnd(SWFShapeRecord):
def __init__(self):
super(SWFShapeRecordEnd, self).__init__(None)
def parse(self, data, level=1):
pass
@property
def type(self):
return SWFShapeRecord.TYPE_END
def __str__(self):
return " [SWFShapeRecordEnd]"
class SWFMatrix(_dumb_repr):
def __init__(self, data):
self.scaleX = 1.0
self.scaleY = 1.0
self.rotateSkew0 = 0.0
self.rotateSkew1 = 0.0
self.translateX = 0.0
self.translateY = 0.0
if not data is None:
self.parse(data)
def parse(self, data):
data.reset_bits_pending();
self.scaleX = 1.0
self.scaleY = 1.0
if data.readUB(1) == 1:
scaleBits = data.readUB(5)
self.scaleX = data.readFB(scaleBits)
self.scaleY = data.readFB(scaleBits)
self.rotateSkew0 = 0.0
self.rotateSkew1 = 0.0
if data.readUB(1) == 1:
rotateBits = data.readUB(5)
self.rotateSkew0 = data.readFB(rotateBits)
self.rotateSkew1 = data.readFB(rotateBits)
translateBits = data.readUB(5)
self.translateX = data.readSB(translateBits)
self.translateY = data.readSB(translateBits)
def to_array(self):
return [
self.scaleX, self.rotateSkew0,
self.rotateSkew1, self.scaleY,
self.translateX, self.translateY
]
def __str__(self):
def fmt(s):
return "%0.2f" % s
return "[%s]" % ",".join(map(fmt, self.to_array()))
class SWFGradientRecord(_dumb_repr):
def __init__(self, data=None, level=1):
self._records = []
if not data is None:
self.parse(data, level)
def parse(self, data, level=1):
self.ratio = data.readUI8()
self.color = data.readRGB() if level <= 2 else data.readRGBA()
def __str__(self):
return "[SWFGradientRecord] Color: %s, Ratio: %d" % (ColorUtils.to_rgb_string(self.color), self.ratio)
class SWFGradient(_dumb_repr):
def __init__(self, data=None, level=1):
self._records = []
self.focal_point = 0.0
if not data is None:
self.parse(data, level)
@property
def records(self):
return self._records
def parse(self, data, level=1):
data.reset_bits_pending();
self.spreadmethod = data.readUB(2)
self.interpolation_mode = data.readUB(2)
num_gradients = data.readUB(4)
for i in range(0, num_gradients):
self._records.append(data.readGRADIENTRECORD(level))
def __str__(self):
s = "[SWFGadient]"
for record in self._records:
s += "\n " + record.__str__()
return s
class SWFFocalGradient(SWFGradient):
def __init__(self, data=None, level=1):
super(SWFFocalGradient, self).__init__(data, level)
def parse(self, data, level=1):
super(SWFFocalGradient, self).parse(data, level)
self.focal_point = data.readFIXED8()
def __str__(self):
return "[SWFFocalGradient] Color: %s, Ratio: %d, Focal: %0.2f" % \
(ColorUtils.to_rgb_string(self.color), self.ratio, self.focal_point)
class SWFFillStyle(_dumb_repr):
def __init__(self, data=None, level=1):
if not data is None:
self.parse(data, level)
COLOR = [0x0]
GRADIENT = [0x10, 0x12, 0x13]
BITMAP = [0x40, 0x41, 0x42, 0x43]
def parse(self, data, level=1):
self.type = data.readUI8()
if self.type in SWFFillStyle.COLOR:
self.rgb = data.readRGB() if level <= 2 else data.readRGBA()
elif self.type in SWFFillStyle.GRADIENT:
self.gradient_matrix = data.readMATRIX()
self.gradient = data.readFOCALGRADIENT(level) if self.type == 0x13 else data.readGRADIENT(level)
elif self.type in SWFFillStyle.BITMAP:
self.bitmap_id = data.readUI16()
self.bitmap_matrix = data.readMATRIX()
else:
raise Exception("Unknown fill style type: 0x%x" % self.type, level)
def get_dependencies(self):
return set([self.bitmap_id]) if self.type in SWFFillStyle.BITMAP else set()
def __str__(self):
s = "[SWFFillStyle] "
if self.type in SWFFillStyle.COLOR:
s += "Color: %s" % ColorUtils.to_rgb_string(self.rgb)
elif self.type in SWFFillStyle.GRADIENT:
s += "Gradient: %s" % self.gradient_matrix
elif self.type in SWFFillStyle.BITMAP:
s += "BitmapID: %d" % (self.bitmap_id)
return s
class SWFLineStyle(_dumb_repr):
def __init__(self, data=None, level=1):
# forward declarations for SWFLineStyle2
self.start_caps_style = LineCapsStyle.ROUND
self.end_caps_style = LineCapsStyle.ROUND
self.joint_style = LineJointStyle.ROUND
self.has_fill_flag = False
self.no_hscale_flag = False
self.no_vscale_flag = False
self.pixelhinting_flag = False
self.no_close = False
self.miter_limit_factor = 3.0
self.fill_type = None
self.width = 1
self.color = 0
if not data is None:
self.parse(data, level)
def get_dependencies(self):
return set()
def parse(self, data, level=1):
self.width = data.readUI16()
self.color = data.readRGB() if level <= 2 else data.readRGBA()
def __str__(self):
s = "[SWFLineStyle] "
s += "Color: %s, Width: %d" % (ColorUtils.to_rgb_string(self.color), self.width)
return s
class SWFLineStyle2(SWFLineStyle):
def __init__(self, data=None, level=1):
super(SWFLineStyle2, self).__init__(data, level)
def parse(self, data, level=1):
self.width = data.readUI16()
self.start_caps_style = data.readUB(2)
self.joint_style = data.readUB(2)
self.has_fill_flag = (data.readUB(1) == 1)
self.no_hscale_flag = (data.readUB(1) == 1)
self.no_vscale_flag = (data.readUB(1) == 1)
self.pixelhinting_flag = (data.readUB(1) == 1)
data.readUB(5)
self.no_close = (data.readUB(1) == 1)
self.end_caps_style = data.readUB(2)
if self.joint_style == LineJointStyle.MITER:
self.miter_limit_factor = data.readFIXED8()
if self.has_fill_flag:
self.fill_type = data.readFILLSTYLE(level)
else:
self.color = data.readRGBA()
def __str__(self):
s = "[SWFLineStyle2] "
s += "Width: %d, " % self.width
s += "StartCapsStyle: %d, " % self.start_caps_style
s += "JointStyle: %d, " % self.joint_style
s += "HasFillFlag: %d, " % self.has_fill_flag
s += "NoHscaleFlag: %d, " % self.no_hscale_flag
s += "NoVscaleFlag: %d, " % self.no_vscale_flag
s += "PixelhintingFlag: %d, " % self.pixelhinting_flag
s += "NoClose: %d, " % self.no_close
if self.joint_style:
s += "MiterLimitFactor: %d" % self.miter_limit_factor
if self.has_fill_flag:
s += "FillType: %s, " % self.fill_type
else:
s += "Color: %s" % ColorUtils.to_rgb_string(self.color)
return s
class SWFMorphGradientRecord(_dumb_repr):
def __init__(self, data):
if not data is None:
self.parse(data)
def parse(self, data):
self.startRatio = data.readUI8()
self.startColor = data.readRGBA()
self.endRatio = data.readUI8()
self.endColor = data.readRGBA()
class SWFMorphGradient(_dumb_repr):
def __init__(self, data, level=1):
self.records = []
if not data is None:
self.parse(data, level)
def parse(self, data, level=1):
self.records = []
numGradients = data.readUI8()
for i in range(0, numGradients):
self.records.append(data.readMORPHGRADIENTRECORD())
class SWFMorphFillStyle(_dumb_repr):
def __init__(self, data, level=1):
if not data is None:
self.parse(data, level)
def get_dependencies(self):
return set([self.bitmapId]) if hasattr(self, 'bitmapId') else set()
def parse(self, data, level=1):
type = data.readUI8()
if type == 0x0:
self.startColor = data.readRGBA()
self.endColor = data.readRGBA()
elif type in [0x10, 0x12]:
self.startGradientMatrix = data.readMATRIX()
self.endGradientMatrix = data.readMATRIX()
self.gradient = data.readMORPHGRADIENT(level)
elif type in [0x40, 0x41, 0x42, 0x43]:
self.bitmapId = data.readUI16()
self.startBitmapMatrix = data.readMATRIX()
self.endBitmapMatrix = data.readMATRIX()
class SWFMorphLineStyle(_dumb_repr):
def __init__(self, data, level=1):
# Forward declaration of SWFMorphLineStyle2 properties
self.startCapsStyle = LineCapsStyle.ROUND
self.endCapsStyle = LineCapsStyle.ROUND
self.jointStyle = LineJointStyle.ROUND
self.hasFillFlag = False
self.noHScaleFlag = False
self.noVScaleFlag = False
self.pixelHintingFlag = False
self.noClose = False
self.miterLimitFactor = 3
self.fillType = None
if not data is None:
self.parse(data, level)
def parse(self, data, level=1):
self.startWidth = data.readUI16()
self.endWidth = data.readUI16()
self.startColor = data.readRGBA()
self.endColor = data.readRGBA()
class SWFMorphLineStyle2(SWFMorphLineStyle):
def __init__(self, data, level=1):
super(SWFMorphLineStyle2, self).__init__(data, level)
def parse(self, data, level=1):
self.startWidth = data.readUI16()
self.endWidth = data.readUI16()
self.startCapsStyle = data.readUB(2)
self.jointStyle = data.readUB(2)
self.hasFillFlag = (data.readUB(1) == 1)
self.noHScaleFlag = (data.readUB(1) == 1)
self.noVScaleFlag = (data.readUB(1) == 1)
self.pixelHintingFlag = (data.readUB(1) == 1)
reserved = data.readUB(5);
self.noClose = (data.readUB(1) == 1)
self.endCapsStyle = data.readUB(2)
if self.jointStyle == LineJointStyle.MITER:
self.miterLimitFactor = data.readFIXED8()
if self.hasFillFlag:
self.fillType = data.readMORPHFILLSTYLE(level)
else:
self.startColor = data.readRGBA()
self.endColor = data.readRGBA()
class SWFRecordHeader(_dumb_repr):
def __init__(self, type, content_length, header_length):
self.type = type
self.content_length = content_length
self.header_length = header_length
@property
def tag_length(self):
return self.header_length + self.content_length
class SWFRectangle(_dumb_repr):
def __init__(self):
self.xmin = self.xmax = self.ymin = self.ymax = 0
def parse(self, s):
s.reset_bits_pending()
bits = s.readUB(5)
self.xmin = s.readSB(bits)
self.xmax = s.readSB(bits)
self.ymin = s.readSB(bits)
self.ymax = s.readSB(bits)
@property
def dimensions(self):
"""
Returns dimensions as (x, y) tuple.
"""
return (self.xmax - self.xmin, self.ymax - self.ymin)
def __str__(self):
return "[xmin: %d xmax: %d ymin: %d ymax: %d]" % (self.xmin/20, self.xmax/20, self.ymin/20, self.ymax/20)
class SWFColorTransform(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
data.reset_bits_pending()
self.hasAddTerms = (data.readUB(1) == 1)
self.hasMultTerms = (data.readUB(1) == 1)
bits = data.readUB(4)
self.rMult = 1
self.gMult = 1
self.bMult = 1
if self.hasMultTerms:
self.rMult = data.readSB(bits)
self.gMult = data.readSB(bits)
self.bMult = data.readSB(bits)
self.rAdd = 0
self.gAdd = 0
self.bAdd = 0
if self.hasAddTerms:
self.rAdd = data.readSB(bits)
self.gAdd = data.readSB(bits)
self.bAdd = data.readSB(bits)
@property
def matrix(self):
return [
self.rMult / 256.0, 0.0, 0.0, 0.0, self.rAdd / 256.0,
0.0, self.gMult / 256.0, 0.0, 0.0, self.gAdd / 256.0,
0.0, 0.0, self.bMult / 256.0, 0.0, self.bAdd / 256.0,
0.0, 0.0, 0.0, 1.0, 1.0
]
def __str__(self):
return "[%d %d %d %d %d %d]" % \
(self.rMult, self.gMult, self.bMult, self.rAdd, self.gAdd, self.bAdd)
class SWFColorTransformWithAlpha(SWFColorTransform):
def __init__(self, data=None):
super(SWFColorTransformWithAlpha, self).__init__(data)
def parse(self, data):
data.reset_bits_pending()
self.hasAddTerms = (data.readUB(1) == 1)
self.hasMultTerms = (data.readUB(1) == 1)
bits = data.readUB(4)
self.rMult = 1
self.gMult = 1
self.bMult = 1
self.aMult = 1
if self.hasMultTerms:
self.rMult = data.readSB(bits)
self.gMult = data.readSB(bits)
self.bMult = data.readSB(bits)
self.aMult = data.readSB(bits)
self.rAdd = 0
self.gAdd = 0
self.bAdd = 0
self.aAdd = 0
if self.hasAddTerms:
self.rAdd = data.readSB(bits)
self.gAdd = data.readSB(bits)
self.bAdd = data.readSB(bits)
self.aAdd = data.readSB(bits)
@property
def matrix(self):
'''
Gets the matrix as a 20 item list
'''
return [
self.rMult / 256.0, 0.0, 0.0, 0.0, self.rAdd / 256.0,
0.0, self.gMult / 256.0, 0.0, 0.0, self.gAdd / 256.0,
0.0, 0.0, self.bMult / 256.0, 0.0, self.bAdd / 256.0,
0.0, 0.0, 0.0, self.aMult / 256.0, self.aAdd / 256.0
]
def __str__(self):
return "[%d %d %d %d %d %d %d %d]" % \
(self.rMult, self.gMult, self.bMult, self.aMult, self.rAdd, self.gAdd, self.bAdd, self.aAdd)
class SWFFrameLabel(_dumb_repr):
def __init__(self, frameNumber, name):
self.frameNumber = frameNumber
self.name = name
def __str__(self):
return "Frame: %d, Name: %s" % (self.frameNumber, self.name)
class SWFScene(_dumb_repr):
def __init__(self, offset, name):
self.offset = offset
self.name = name
def __str__(self):
return "Scene: %d, Name: '%s'" % (self.offset, self.name)
class SWFSymbol(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
self.tagId = data.readUI16()
self.name = data.readString()
def __str__(self):
return "ID %d, Name: %s" % (self.tagId, self.name)
class SWFGlyphEntry(_dumb_repr):
def __init__(self, data=None, glyphBits=0, advanceBits=0):
if not data is None:
self.parse(data, glyphBits, advanceBits)
def parse(self, data, glyphBits, advanceBits):
# GLYPHENTRYs are not byte aligned
self.index = data.readUB(glyphBits)
self.advance = data.readSB(advanceBits)
def __str__(self):
return "Index: %d, Advance: %d" % (self.index, self.advance)
class SWFKerningRecord(_dumb_repr):
def __init__(self, data=None, wideCodes=False):
if not data is None:
self.parse(data, wideCodes)
def parse(self, data, wideCodes):
self.code1 = data.readUI16() if wideCodes else data.readUI8()
self.code2 = data.readUI16() if wideCodes else data.readUI8()
self.adjustment = data.readSI16()
def __str__(self):
return "Code1: %d, Code2: %d, Adjustment: %d" % (self.code1, self.code2, self.adjustment)
class SWFTextRecord(_dumb_repr):
def __init__(self, data=None, glyphBits=0, advanceBits=0, previousRecord=None, level=1):
self.hasFont = False
self.hasColor = False
self.hasYOffset = False
self.hasXOffset = False
self.fontId = -1
self.textColor = 0
self.xOffset = 0
self.yOffset = 0
self.textHeight = 12
self.glyphEntries = []
if not data is None:
self.parse(data, glyphBits, advanceBits, previousRecord, level)
def get_dependencies(self):
return set([self.fontId]) if self.hasFont else set()
def parse(self, data, glyphBits, advanceBits, previousRecord=None, level=1):
self.glyphEntries = []
styles = data.readUI8()
self.type = styles >> 7
self.hasFont = ((styles & 0x08) != 0)
self.hasColor = ((styles & 0x04) != 0)
self.hasYOffset = ((styles & 0x02) != 0)
self.hasXOffset = ((styles & 0x01) != 0)
if self.hasFont:
self.fontId = data.readUI16()
elif not previousRecord is None:
self.fontId = previousRecord.fontId
if self.hasColor:
self.textColor = data.readRGB() if level < 2 else data.readRGBA()
elif not previousRecord is None:
self.textColor = previousRecord.textColor
if self.hasXOffset:
self.xOffset = data.readSI16();
elif not previousRecord is None:
self.xOffset = previousRecord.xOffset
if self.hasYOffset:
self.yOffset = data.readSI16();
elif not previousRecord is None:
self.yOffset = previousRecord.yOffset
if self.hasFont:
self.textHeight = data.readUI16()
elif not previousRecord is None:
self.textHeight = previousRecord.textHeight
glyphCount = data.readUI8()
for i in range(0, glyphCount):
self.glyphEntries.append(data.readGLYPHENTRY(glyphBits, advanceBits))
def __str__(self):
return "[SWFTextRecord]"
class SWFClipActions(_dumb_repr):
def __init__(self, data=None, version=0):
self.eventFlags = None
self.records = []
if not data is None:
self.parse(data, version)
def parse(self, data, version):
data.readUI16() # reserved, always 0
self.eventFlags = data.readCLIPEVENTFLAGS(version)
self.records = []
record = data.readCLIPACTIONRECORD(version)
while not record is None:
self.records.append(record)
record = data.readCLIPACTIONRECORD(version)
def __str__(self):
return "[SWFClipActions]"
class SWFClipActionRecord(_dumb_repr):
def __init__(self, data=None, version=0):
self.eventFlags = None
self.keyCode = 0
self.actions = []
if not data is None:
self.parse(data, version)
def parse(self, data, version):
self.actions = []
self.eventFlags = data.readCLIPEVENTFLAGS(version)
data.readUI32() # actionRecordSize, not needed here
if self.eventFlags.keyPressEvent:
self.keyCode = data.readUI8()
action = data.readACTIONRECORD()
while not action is None:
self.actions.append(action)
action = data.readACTIONRECORD()
def __str__(self):
return "[SWFClipActionRecord]"
class SWFClipEventFlags(_dumb_repr):
keyUpEvent = False
keyDownEvent = False
mouseUpEvent = False
mouseDownEvent = False
mouseMoveEvent = False
unloadEvent = False
enterFrameEvent = False
loadEvent = False
dragOverEvent = False # SWF6
rollOutEvent = False # SWF6
rollOverEvent = False # SWF6
releaseOutsideEvent = False # SWF6
releaseEvent = False # SWF6
pressEvent = False # SWF6
initializeEvent = False # SWF6
dataEvent = False
constructEvent = False # SWF7
keyPressEvent = False # SWF6
dragOutEvent = False # SWF6
def __init__(self, data=None, version=0):
if not data is None:
self.parse(data, version)
def parse(self, data, version):
flags1 = data.readUI8();
self.keyUpEvent = ((flags1 & 0x80) != 0)
self.keyDownEvent = ((flags1 & 0x40) != 0)
self.mouseUpEvent = ((flags1 & 0x20) != 0)
self.mouseDownEvent = ((flags1 & 0x10) != 0)
self.mouseMoveEvent = ((flags1 & 0x08) != 0)
self.unloadEvent = ((flags1 & 0x04) != 0)
self.enterFrameEvent = ((flags1 & 0x02) != 0)
self.loadEvent = ((flags1 & 0x01) != 0)
flags2 = data.readUI8()
self.dragOverEvent = ((flags2 & 0x80) != 0)
self.rollOutEvent = ((flags2 & 0x40) != 0)
self.rollOverEvent = ((flags2 & 0x20) != 0)
self.releaseOutsideEvent = ((flags2 & 0x10) != 0)
self.releaseEvent = ((flags2 & 0x08) != 0)
self.pressEvent = ((flags2 & 0x04) != 0)
self.initializeEvent = ((flags2 & 0x02) != 0)
self.dataEvent = ((flags2 & 0x01) != 0)
if version >= 6:
flags3 = data.readUI8()
self.constructEvent = ((flags3 & 0x04) != 0)
self.keyPressEvent = ((flags3 & 0x02) != 0)
self.dragOutEvent = ((flags3 & 0x01) != 0)
data.readUI8() # reserved, always 0
def __str__(self):
return "[SWFClipEventFlags]"
class SWFZoneData(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
self.alignmentCoordinate = data.readFLOAT16()
self.zoneRange = data.readFLOAT16()
def __str__(self):
return "[SWFZoneData]"
class SWFZoneRecord(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
self.zoneData = []
numZoneData = data.readUI8()
for i in range(0, numZoneData):
self.zoneData.append(data.readZONEDATA())
mask = data.readUI8()
self.maskX = ((mask & 0x01) != 0)
self.maskY = ((mask & 0x02) != 0)
def __str__(self):
return "[SWFZoneRecord]"
class SWFSoundInfo(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
reserved = data.readUB(2)
assert reserved == 0
self.syncStop = data.readUB(1) == 1
self.syncNoMultiple = data.readUB(1) == 1
self.hasEnvelope = data.readUB(1) == 1
self.hasLoops = data.readUB(1) == 1
self.hasOutPoint = data.readUB(1) == 1
self.hasInPoint = data.readUB(1) == 1
self.inPoint = data.readUI32() if self.hasInPoint else None
self.outPoint = data.readUI32() if self.hasOutPoint else None
self.loopCount = data.readUI16() if self.hasLoops else None
self.envPointCount = data.readUI8() if self.hasEnvelope else None
self.envelopePoints = [data.readSOUNDENVELOPE() for x in range(self.envPointCount)] if self.hasEnvelope else None
def __str__(self):
return "[SWFSoundInfo]"
class SWFSoundEnvelope(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
self.position = data.readUI32()
self.leftLevel = data.readUI16()
self.rightLevel = data.readUI16()
def __str__(self):
return "[SWFSoundEnvelope]"
class SWFButtonRecord(_dumb_repr):
def __init__(self, version, data=None):
# version is 1 for DefineButton, 2 for DefineButton2, etc
if not data is None:
self.parse(data, version)
def get_dependencies(self):
return set([self.characterId]) if self.valid else set()
def parse(self, data, version):
reserved0 = data.readUB(2)
self.hasBlendMode = data.readUB(1) == 1
self.hasFilterList = data.readUB(1) == 1
self.stateHitTest = data.readUB(1) == 1
self.stateDown = data.readUB(1) == 1
self.stateOver = data.readUB(1) == 1
self.stateUp = data.readUB(1) == 1
self.valid = reserved0 or self.hasBlendMode or \
self.hasFilterList or self.stateHitTest or \
self.stateDown or self.stateOver or self.stateUp
if not self.valid:
return
self.characterId = data.readUI16()
self.placeDepth = data.readUI16()
self.placeMatrix = data.readMATRIX()
if version == 2:
self.colorTransform = data.readCXFORMWITHALPHA()
self.filterList = data.readFILTERLIST() if self.hasFilterList else None
self.blendMode = data.readUI8() if self.hasBlendMode else 0
def __str__(self):
return "[SWFButtonRecord]"
def __repr__(self):
return "[SWFButtonRecord %r]" % self.__dict__
class SWFButtonCondAction(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def parse(self, data):
self.idleToOverDown = data.readUB(1) == 1
self.outDownToIdle = data.readUB(1) == 1
self.outDownToOverDown = data.readUB(1) == 1
self.overDownToOutDown = data.readUB(1) == 1
self.overDownToOverUp = data.readUB(1) == 1
self.overUpToOverDown = data.readUB(1) == 1
self.overUpToIdle = data.readUB(1) == 1
self.idleToOverUp = data.readUB(1) == 1
self.keyPress = data.readUB(7)
self.overDownToIdle = data.readUB(1) == 1
self.actions = data.readACTIONRECORDs()
def __str__(self):
return "[SWFButtonCondAction]"
class SWFExport(_dumb_repr):
def __init__(self, data=None):
if not data is None:
self.parse(data)
def get_dependencies(self):
return set([self.characterId])
def parse(self, data):
self.characterId = data.readUI16()
self.characterName = data.readString()
def __str__(self):
return "[SWFExport %d as %r]" % (self.characterId, self.characterName)
|
|
from datetime import datetime, timedelta, time
from dateutil.parser import parser
from django.core.cache import cache
import json
from casexml.apps.case.models import CommCareCase
from corehq.apps.api.es import ReportXFormES, get_report_script_field
from corehq.util.dates import iso_string_to_datetime, iso_string_to_date
from dimagi.utils.parsing import json_format_date
from pact.enums import PACT_DOMAIN
from pact.lib.quicksect import IntervalNode
from pact.utils import get_patient_display_cache
import logging
cached_schedules = {}
def get_seconds(d):
import time
return time.mktime(d.utctimetuple())
class CHWPatientSchedule(object):
def __init__(self, username, intervaltrees, raw_schedule):
self.username = username
self.intervals = intervaltrees
self.raw_schedule = raw_schedule
def scheduled_for_date(self, date_val):
"""
For a given date, return the array of pact_ids that are scheduled for visiting. This will check the activate date by using the internal interval tree.
Parameter: datetime value
Returns: array of pact_ids
"""
day_of_week = date_val.isoweekday() % 7
if not self.intervals.has_key(day_of_week):
return []
else:
pass
day_tree = self.intervals[day_of_week]
results = []
day_tree.intersect(get_seconds(date_val) - .1, get_seconds(date_val),
lambda x: results.append(x.other))
return results
@classmethod
def get_schedule(cls, chw_username, override_date=None):
"""
Generate schedule object for a given username
"""
cached_schedules = None
if override_date == None:
nowdate = datetime.utcnow()
else:
nowdate = override_date
day_intervaltree = {}
if cached_schedules == None:
#no documents, then we need to load them up
db = CommCareCase.get_db()
chw_schedules = db.view('pact/chw_dot_schedules', key=chw_username).all()
to_cache = []
for item in chw_schedules:
single_sched = item['value']
to_cache.append(single_sched)
cache.set("%s_schedule" % (chw_username), json.dumps(to_cache), 3600)
cached_arr = to_cache
else:
cached_arr = json.loads(cached_schedules)
for single_sched in cached_arr:
day_of_week = int(single_sched['day_of_week'])
if day_intervaltree.has_key(day_of_week):
daytree = day_intervaltree[day_of_week]
else:
#if there's no day of week indication for this, then it's just a null interval node. To start this node, we make it REALLY old.
daytree = IntervalNode(get_seconds(datetime.min),
get_seconds(nowdate + timedelta(days=10)))
if single_sched['ended_date'] == None:
enddate = nowdate + timedelta(days=9)
else:
enddate = iso_string_to_datetime(single_sched['ended_date'])
startdate = iso_string_to_datetime(single_sched['active_date'])
case_id = single_sched['case_id']
if single_sched.has_key('error'):
#this is a non-showstopping issue due to quirks with older submissions
logging.error("Error, no pactid: %s" % single_sched['error'])
daytree.insert(get_seconds(startdate), get_seconds(enddate), other=case_id)
day_intervaltree[day_of_week] = daytree
return cls(chw_username, day_intervaltree, cached_arr)
def dots_submissions_by_case(case_id, query_date, username=None):
"""
Actually run query for username submissions
todo: do terms for the pact_ids instead of individual term?
"""
xform_es = ReportXFormES(PACT_DOMAIN)
script_fields = {
"doc_id": get_report_script_field('_id', is_known=True),
"pact_id": get_report_script_field("form.pact_id"),
"encounter_date": get_report_script_field('form.encounter_date'),
"username": get_report_script_field('form.meta.username', is_known=True),
"visit_type": get_report_script_field('form.visit_type'),
"visit_kept": get_report_script_field('form.visit_kept'),
"contact_type": get_report_script_field('form.contact_type'),
"observed_art": get_report_script_field('form.observed_art'),
"observed_non_art": get_report_script_field('form.observed_non_art'),
"observer_non_art_dose": get_report_script_field('form.observed_non_art_dose'),
"observed_art_dose": get_report_script_field('form.observed_art_dose'),
"pillbox_check": get_report_script_field('form.pillbox_check.check'),
"scheduled": get_report_script_field('form.scheduled'),
}
term_block = {'form.#type': 'dots_form'}
if username is not None:
term_block['form.meta.username'] = username
query = xform_es.by_case_id_query(PACT_DOMAIN, case_id, terms=term_block,
date_field='form.encounter_date.#value', startdate=query_date,
enddate=query_date)
query['sort'] = {'received_on': 'asc'}
query['script_fields'] = script_fields
query['size'] = 1
query['from'] = 0
res = xform_es.run_query(query)
print json.dumps(res, indent=2)
return res
def get_schedule_tally(username, total_interval, override_date=None):
"""
Main entry point
For a given username and interval, get a simple array of the username and scheduled visit (whether a submission is there or not) exists.
returns (schedule_tally_array, patient_array, total_scheduled (int), total_visited(int))
schedul_tally_array = [visit_date, [(patient1, visit1), (patient2, visit2), (patient3, None), (patient4, visit4), ...]]
where visit = XFormInstance
"""
if override_date is None:
nowdate = datetime.utcnow()
chw_schedule = CHWPatientSchedule.get_schedule(username)
else:
nowdate = override_date
chw_schedule = CHWPatientSchedule.get_schedule(username, override_date=nowdate)
patient_case_ids = set([x['case_id'] for x in chw_schedule.raw_schedule])
patient_cache = get_patient_display_cache(list(patient_case_ids))
#got the chw schedule
#now let's walk through the date range, and get the scheduled CHWs per this date.visit_dates = []
ret = [] #where it's going to be an array of tuples:
#(date, scheduled[], submissions[] - that line up with the scheduled)
total_scheduled = 0
total_visited = 0
for n in range(0, total_interval):
td = timedelta(days=n)
visit_date = nowdate - td
scheduled_case_ids = chw_schedule.scheduled_for_date(visit_date)
patient_case_ids = set(filter(lambda x: x is not None, scheduled_case_ids))
dereferenced_patient_info = [patient_cache.get(x, {}) for x in patient_case_ids]
visited = []
#inefficient, but we need to get the patients in alpha order
#patients = sorted(patients, key=lambda x: x.last_name)
dp = parser()
for case_id in patient_case_ids:
total_scheduled += 1
search_results = dots_submissions_by_case(case_id, visit_date, username=username)
submissions = search_results['hits']['hits']
if len(submissions) > 0:
#calculate if pillbox checked
pillbox_check_str = submissions[0]['fields']['pillbox_check']
if len(pillbox_check_str) > 0:
pillbox_check_data = json.loads(pillbox_check_str)
anchor_date = dp.parse(pillbox_check_data.get('anchor'))
else:
pillbox_check_data = {}
anchor_date = datetime.min
encounter_date = dp.parse(submissions[0]['fields']['encounter_date'])
submissions[0]['fields']['has_pillbox_check'] = 'Yes' if anchor_date.date() == encounter_date.date() else 'No'
visited.append(submissions[0]['fields'])
total_visited += 1
else:
#ok, so no submission from this chw, let's see if there's ANY from anyone on this day.
search_results = dots_submissions_by_case(case_id, visit_date)
other_submissions = search_results['hits']['hits']
if len(other_submissions) > 0:
visited.append(other_submissions[0]['fields'])
total_visited += 1
else:
visited.append(None)
ret.append((visit_date, zip(dereferenced_patient_info, visited)))
return ret, patient_case_ids, total_scheduled, total_visited
def chw_calendar_submit_report(request, username, interval=7):
"""Calendar view of submissions by CHW, overlaid with their scheduled visits, and whether they made them or not."""
return_context = {}
return_context['username'] = username
total_interval = interval
if 'interval' in request.GET:
try:
total_interval = int(request.GET['interval'])
except ValueError:
pass
#secret date ranges
if 'enddate' in request.GET:
end_date_str = request.GET.get('enddate', json_format_date(datetime.utcnow()))
end_date = iso_string_to_date(end_date_str)
else:
end_date = datetime.utcnow().date()
if 'startdate' in request.GET:
#if there's a startdate, trump interval
start_date_str = request.GET.get('startdate', json_format_date(datetime.utcnow()))
start_date = iso_string_to_date(start_date_str)
total_interval = (end_date - start_date).days
ret, patients, total_scheduled, total_visited = get_schedule_tally(username,
total_interval,
override_date=end_date)
if len(ret) > 0:
return_context['date_arr'] = ret
return_context['total_scheduled'] = total_scheduled
return_context['total_visited'] = total_visited
return_context['start_date'] = ret[0][0]
return_context['end_date'] = ret[-1][0]
else:
return_context['total_scheduled'] = 0
return_context['total_visited'] = 0
return return_context
|
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage user accounts on a Google Compute Engine instances."""
import datetime
import json
import logging.handlers
import optparse
import random
from google_compute_engine import config_manager
from google_compute_engine import constants
from google_compute_engine import file_utils
from google_compute_engine import logger
from google_compute_engine import metadata_watcher
from google_compute_engine.accounts import accounts_utils
from google_compute_engine.accounts import oslogin_utils
LOCKFILE = constants.LOCALSTATEDIR + '/lock/google_accounts.lock'
class AccountsDaemon(object):
"""Manage user accounts based on changes to metadata."""
invalid_users = set()
user_ssh_keys = {}
def __init__(
self, groups=None, remove=False, gpasswd_add_cmd=None,
gpasswd_remove_cmd=None, groupadd_cmd=None, useradd_cmd=None,
userdel_cmd=None, usermod_cmd=None, debug=False):
"""Constructor.
Args:
groups: string, a comma separated list of groups.
remove: bool, True if deprovisioning a user should be destructive.
useradd_cmd: string, command to create a new user.
userdel_cmd: string, command to delete a user.
usermod_cmd: string, command to modify user's groups.
groupadd_cmd: string, command to add a new group.
gpasswd_add_cmd: string, command to add an user to a group.
gpasswd_remove_cmd: string, command to remove an user from a group.
debug: bool, True if debug output should write to the console.
"""
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='google-accounts', debug=debug, facility=facility)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
self.utils = accounts_utils.AccountsUtils(
logger=self.logger, groups=groups, remove=remove,
gpasswd_add_cmd=gpasswd_add_cmd, gpasswd_remove_cmd=gpasswd_remove_cmd,
groupadd_cmd=groupadd_cmd, useradd_cmd=useradd_cmd,
userdel_cmd=userdel_cmd, usermod_cmd=usermod_cmd)
self.oslogin = oslogin_utils.OsLoginUtils(logger=self.logger)
try:
with file_utils.LockFile(LOCKFILE):
self.logger.info('Starting Google Accounts daemon.')
timeout = 60 + random.randint(0, 30)
self.watcher.WatchMetadata(
self.HandleAccounts, recursive=True, timeout=timeout)
except (IOError, OSError) as e:
self.logger.warning(str(e))
def _HasExpired(self, key):
"""Check whether an SSH key has expired.
Uses Google-specific semantics of the OpenSSH public key format's comment
field to determine if an SSH key is past its expiration timestamp, and
therefore no longer to be trusted. This format is still subject to change.
Reliance on it in any way is at your own risk.
Args:
key: string, a single public key entry in OpenSSH public key file format.
This will be checked for Google-specific comment semantics, and if
present, those will be analysed.
Returns:
bool, True if the key has Google-specific comment semantics and has an
expiration timestamp in the past, or False otherwise.
"""
self.logger.debug('Processing key: %s.', key)
try:
schema, json_str = key.split(None, 3)[2:]
except (ValueError, AttributeError):
self.logger.debug('No schema identifier. Not expiring key.')
return False
if schema != 'google-ssh':
self.logger.debug('Invalid schema %s. Not expiring key.', schema)
return False
try:
json_obj = json.loads(json_str)
except ValueError:
self.logger.debug('Invalid JSON %s. Not expiring key.', json_str)
return False
if 'expireOn' not in json_obj:
self.logger.debug('No expiration timestamp. Not expiring key.')
return False
expire_str = json_obj['expireOn']
format_str = '%Y-%m-%dT%H:%M:%S+0000'
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
self.logger.warning(
'Expiration timestamp "%s" not in format %s. Not expiring key.',
expire_str, format_str)
return False
# Expire the key if and only if we have exceeded the expiration timestamp.
return datetime.datetime.utcnow() > expire_time
def _ParseAccountsData(self, account_data):
"""Parse the SSH key data into a user map.
Args:
account_data: string, the metadata server SSH key attributes data.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
if not account_data:
return {}
lines = [line for line in account_data.splitlines() if line]
user_map = {}
for line in lines:
if not all(ord(c) < 128 for c in line):
self.logger.info('SSH key contains non-ascii character: %s.', line)
continue
split_line = line.split(':', 1)
if len(split_line) != 2:
self.logger.info('SSH key is not a complete entry: %s.', split_line)
continue
user, key = split_line
if self._HasExpired(key):
self.logger.debug('Expired SSH key for user %s: %s.', user, key)
continue
if user not in user_map:
user_map[user] = []
user_map[user].append(key)
logging.debug('User accounts: %s.', user_map)
return user_map
def _GetInstanceAndProjectAttributes(self, metadata_dict):
"""Get dictionaries for instance and project attributes.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
tuple, two dictionaries for instance and project attributes.
"""
metadata_dict = metadata_dict or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = {}
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = {}
self.logger.warning('Project attributes were not found.')
return instance_data, project_data
def _GetAccountsData(self, metadata_dict):
"""Get the user accounts specified in metadata server contents.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
block_project = instance_data.get('block-project-ssh-keys', '').lower()
if block_project != 'true' and not instance_data.get('sshKeys'):
valid_keys.append(project_data.get('ssh-keys'))
valid_keys.append(project_data.get('sshKeys'))
accounts_data = '\n'.join([key for key in valid_keys if key])
return self._ParseAccountsData(accounts_data)
def _UpdateUsers(self, update_users):
"""Provision and update Linux user accounts based on account metadata.
Args:
update_users: dict, authorized users mapped to their public SSH keys.
"""
for user, ssh_keys in update_users.items():
if not user or user in self.invalid_users:
continue
configured_keys = self.user_ssh_keys.get(user, [])
if set(ssh_keys) != set(configured_keys):
if not self.utils.UpdateUser(user, ssh_keys):
self.invalid_users.add(user)
else:
self.user_ssh_keys[user] = ssh_keys[:]
def _RemoveUsers(self, remove_users):
"""Deprovision Linux user accounts that do not appear in account metadata.
Args:
remove_users: list, the username strings of the Linux accounts to remove.
"""
for username in remove_users:
self.utils.RemoveUser(username)
self.user_ssh_keys.pop(username, None)
self.invalid_users -= set(remove_users)
def _GetEnableOsLoginValue(self, metadata_dict):
"""Get the value of the enable-oslogin metadata key.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
bool, True if OS Login is enabled for VM access.
"""
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
instance_value = instance_data.get('enable-oslogin')
project_value = project_data.get('enable-oslogin')
value = instance_value or project_value or ''
return value.lower() == 'true'
def _GetEnableTwoFactorValue(self, metadata_dict):
"""Get the value of the enable-oslogin-2fa metadata key.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
bool, True if two factor authentication is enabled for VM access.
"""
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
instance_value = instance_data.get('enable-oslogin-2fa')
project_value = project_data.get('enable-oslogin-2fa')
value = instance_value or project_value or ''
return value.lower() == 'true'
def HandleAccounts(self, result):
"""Called when there are changes to the contents of the metadata server.
Args:
result: json, the deserialized contents of the metadata server.
"""
self.logger.debug('Checking for changes to user accounts.')
configured_users = self.utils.GetConfiguredUsers()
enable_oslogin = self._GetEnableOsLoginValue(result)
enable_two_factor = self._GetEnableTwoFactorValue(result)
if enable_oslogin:
desired_users = {}
self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)
else:
desired_users = self._GetAccountsData(result)
self.oslogin.UpdateOsLogin(False)
remove_users = sorted(set(configured_users) - set(desired_users.keys()))
self._UpdateUsers(desired_users)
self._RemoveUsers(remove_users)
self.utils.SetConfiguredUsers(desired_users.keys())
def main():
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug', action='store_true', dest='debug',
help='print debug output to the console.')
(options, _) = parser.parse_args()
instance_config = config_manager.ConfigManager()
if instance_config.GetOptionBool('Daemons', 'accounts_daemon'):
AccountsDaemon(
groups=instance_config.GetOptionString('Accounts', 'groups'),
remove=instance_config.GetOptionBool('Accounts', 'deprovision_remove'),
useradd_cmd=instance_config.GetOptionString('Accounts', 'useradd_cmd'),
userdel_cmd=instance_config.GetOptionString('Accounts', 'userdel_cmd'),
usermod_cmd=instance_config.GetOptionString('Accounts', 'usermod_cmd'),
groupadd_cmd=instance_config.GetOptionString(
'Accounts', 'groupadd_cmd'),
gpasswd_add_cmd=instance_config.GetOptionString('Accounts', 'gpasswd_add_cmd'),
gpasswd_remove_cmd=instance_config.GetOptionString('Accounts', 'gpasswd_remove_cmd'),
debug=bool(options.debug))
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import, print_function, division
import warnings
import six
from .headers import Headers
from .. import encoding, utils
CONTENT_MISSING = 0
if six.PY2: # pragma: nocover
_native = lambda x: x
_always_bytes = lambda x: x
else:
# While the HTTP head _should_ be ASCII, it's not uncommon for certain headers to be utf-8 encoded.
_native = lambda x: x.decode("utf-8", "surrogateescape")
_always_bytes = lambda x: utils.always_bytes(x, "utf-8", "surrogateescape")
class MessageData(utils.Serializable):
def __eq__(self, other):
if isinstance(other, MessageData):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def set_state(self, state):
for k, v in state.items():
if k == "headers":
v = Headers.from_state(v)
setattr(self, k, v)
def get_state(self):
state = vars(self).copy()
state["headers"] = state["headers"].get_state()
return state
@classmethod
def from_state(cls, state):
state["headers"] = Headers.from_state(state["headers"])
return cls(**state)
class Message(utils.Serializable):
def __init__(self, data):
self.data = data
def __eq__(self, other):
if isinstance(other, Message):
return self.data == other.data
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_state(self):
return self.data.get_state()
def set_state(self, state):
self.data.set_state(state)
@classmethod
def from_state(cls, state):
return cls(**state)
@property
def headers(self):
"""
Message headers object
Returns:
netlib.http.Headers
"""
return self.data.headers
@headers.setter
def headers(self, h):
self.data.headers = h
@property
def content(self):
"""
The raw (encoded) HTTP message body
See also: :py:attr:`text`
"""
return self.data.content
@content.setter
def content(self, content):
self.data.content = content
if isinstance(content, bytes):
self.headers["content-length"] = str(len(content))
@property
def http_version(self):
"""
Version string, e.g. "HTTP/1.1"
"""
return _native(self.data.http_version)
@http_version.setter
def http_version(self, http_version):
self.data.http_version = _always_bytes(http_version)
@property
def timestamp_start(self):
"""
First byte timestamp
"""
return self.data.timestamp_start
@timestamp_start.setter
def timestamp_start(self, timestamp_start):
self.data.timestamp_start = timestamp_start
@property
def timestamp_end(self):
"""
Last byte timestamp
"""
return self.data.timestamp_end
@timestamp_end.setter
def timestamp_end(self, timestamp_end):
self.data.timestamp_end = timestamp_end
@property
def text(self):
"""
The decoded HTTP message body.
Decoded contents are not cached, so accessing this attribute repeatedly is relatively expensive.
.. note::
This is not implemented yet.
See also: :py:attr:`content`, :py:class:`decoded`
"""
# This attribute should be called text, because that's what requests does.
raise NotImplementedError()
@text.setter
def text(self, text):
raise NotImplementedError()
def decode(self):
"""
Decodes body based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Returns:
True, if decoding succeeded.
False, otherwise.
"""
ce = self.headers.get("content-encoding")
data = encoding.decode(ce, self.content)
if data is None:
return False
self.content = data
self.headers.pop("content-encoding", None)
return True
def encode(self, e):
"""
Encodes body with the encoding e, where e is "gzip", "deflate" or "identity".
Returns:
True, if decoding succeeded.
False, otherwise.
"""
data = encoding.encode(e, self.content)
if data is None:
return False
self.content = data
self.headers["content-encoding"] = e
return True
# Legacy
@property
def body(self): # pragma: nocover
warnings.warn(".body is deprecated, use .content instead.", DeprecationWarning)
return self.content
@body.setter
def body(self, body): # pragma: nocover
warnings.warn(".body is deprecated, use .content instead.", DeprecationWarning)
self.content = body
class decoded(object):
"""
A context manager that decodes a request or response, and then
re-encodes it with the same encoding after execution of the block.
Example:
.. code-block:: python
with decoded(request):
request.content = request.content.replace("foo", "bar")
"""
def __init__(self, message):
self.message = message
ce = message.headers.get("content-encoding")
if ce in encoding.ENCODINGS:
self.ce = ce
else:
self.ce = None
def __enter__(self):
if self.ce:
self.message.decode()
def __exit__(self, type, value, tb):
if self.ce:
self.message.encode(self.ce)
|
|
'''
---
module: openshift_logging_facts
version_added: ""
short_description: Gather facts about the OpenShift logging stack
description:
- Determine the current facts about the OpenShift logging stack (e.g. cluster size)
options:
author: Red Hat, Inc
'''
import copy
import json
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
from subprocess import * # noqa: F402,F403
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
from ansible.module_utils.basic import * # noqa: F402,F403
import yaml
EXAMPLES = """
- action: opneshift_logging_facts
"""
RETURN = """
"""
DEFAULT_OC_OPTIONS = ["-o", "json"]
# constants used for various labels and selectors
COMPONENT_KEY = "component"
LOGGING_INFRA_KEY = "logging-infra"
# selectors for filtering resources
DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
# pylint: disable=line-too-long
COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops", "mux", "eventrouter"]
class OCBaseCommand(object):
''' The base class used to query openshift '''
def __init__(self, binary, kubeconfig, namespace):
''' the init method of OCBaseCommand class '''
self.binary = binary
self.kubeconfig = kubeconfig
self.user = self.get_system_admin(self.kubeconfig)
self.namespace = namespace
# pylint: disable=no-self-use
def get_system_admin(self, kubeconfig):
''' Retrieves the system admin '''
with open(kubeconfig, 'r') as kubeconfig_file:
config = yaml.load(kubeconfig_file)
for user in config["users"]:
if user["name"].startswith("system:admin"):
return user["name"]
raise Exception("Unable to find system:admin in: " + kubeconfig)
# pylint: disable=too-many-arguments, dangerous-default-value
def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):
''' Wrapper method for the "oc" command '''
cmd = [self.binary, sub, kind]
if name is not None:
cmd = cmd + [name]
if namespace is not None:
cmd = cmd + ["-n", namespace]
if add_options is None:
add_options = []
cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options
try:
process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
out, err = process.communicate(cmd)
if len(err) > 0:
if 'not found' in err:
return {'items': []}
if 'No resources found' in err:
return {'items': []}
raise Exception(err)
except Exception as excp:
err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)
raise Exception(err)
return json.loads(out)
class OpenshiftLoggingFacts(OCBaseCommand):
''' The class structure for holding the OpenshiftLogging Facts'''
name = "facts"
def __init__(self, logger, binary, kubeconfig, namespace):
''' The init method for OpenshiftLoggingFacts '''
super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
self.logger = logger
self.facts = dict()
def default_keys_for(self, kind):
''' Sets the default key values for kind '''
for comp in COMPONENTS:
self.add_facts_for(comp, kind)
def add_facts_for(self, comp, kind, name=None, facts=None):
''' Add facts for the provided kind '''
if comp not in self.facts:
self.facts[comp] = dict()
if kind not in self.facts[comp]:
self.facts[comp][kind] = dict()
if name:
self.facts[comp][kind][name] = facts
def facts_for_routes(self, namespace):
''' Gathers facts for Routes in logging namespace '''
self.default_keys_for("routes")
route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR])
if len(route_list["items"]) == 0:
return None
for route in route_list["items"]:
name = route["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))
self.facts["agl_namespace"] = namespace
def facts_for_daemonsets(self, namespace):
''' Gathers facts for Daemonsets in logging namespace '''
self.default_keys_for("daemonsets")
ds_list = self.oc_command("get", "daemonsets", namespace=namespace,
add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"])
if len(ds_list["items"]) == 0:
return
for ds_item in ds_list["items"]:
name = ds_item["metadata"]["name"]
comp = self.comp(name)
spec = ds_item["spec"]["template"]["spec"]
result = dict(
selector=ds_item["spec"]["selector"],
containers=dict(),
nodeSelector=spec["nodeSelector"],
serviceAccount=spec["serviceAccount"],
terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]
)
for container in spec["containers"]:
result["containers"][container["name"]] = container
self.add_facts_for(comp, "daemonsets", name, result)
def facts_for_pvcs(self, namespace):
''' Gathers facts for PVCS in logging namespace'''
self.default_keys_for("pvcs")
pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
if len(pvclist["items"]) == 0:
return
for pvc in pvclist["items"]:
name = pvc["metadata"]["name"]
comp = self.comp(name)
self.add_facts_for(comp, "pvcs", name, dict())
def facts_for_deploymentconfigs(self, namespace):
''' Gathers facts for DeploymentConfigs in logging namespace '''
self.default_keys_for("deploymentconfigs")
dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
if len(dclist["items"]) == 0:
return
dcs = dclist["items"]
for dc_item in dcs:
name = dc_item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
spec = dc_item["spec"]["template"]["spec"]
facts = dict(
name=name,
selector=dc_item["spec"]["selector"],
replicas=dc_item["spec"]["replicas"],
serviceAccount=spec["serviceAccount"],
containers=dict(),
volumes=dict()
)
if "nodeSelector" in spec:
facts["nodeSelector"] = spec["nodeSelector"]
if "supplementalGroups" in spec["securityContext"]:
facts["storageGroups"] = spec["securityContext"]["supplementalGroups"]
facts["spec"] = spec
if "volumes" in spec:
for vol in spec["volumes"]:
clone = copy.deepcopy(vol)
clone.pop("name", None)
facts["volumes"][vol["name"]] = clone
for container in spec["containers"]:
facts["containers"][container["name"]] = container
self.add_facts_for(comp, "deploymentconfigs", name, facts)
def facts_for_services(self, namespace):
''' Gathers facts for services in logging namespace '''
self.default_keys_for("services")
servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
if len(servicelist["items"]) == 0:
return
for service in servicelist["items"]:
name = service["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "services", name, dict())
# pylint: disable=too-many-arguments
def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):
'''Extracts facts in logging namespace from configmap'''
if yaml_file is not None:
if config_key.endswith(".yml") or config_key.endswith(".yaml"):
config_facts = yaml.load(yaml_file)
self.facts[comp][kind][name][config_key] = config_facts
self.facts[comp][kind][name][config_key]["raw"] = yaml_file
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "configmaps", name, dict(item["data"]))
if comp in ["elasticsearch", "elasticsearch_ops"]:
for config_key in item["data"]:
self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key])
def facts_for_oauthclients(self, namespace):
''' Gathers facts for oauthclients used with logging '''
self.default_keys_for("oauthclients")
a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
result = dict(
redirectURIs=item["redirectURIs"]
)
self.add_facts_for(comp, "oauthclients", name, result)
def facts_for_secrets(self, namespace):
''' Gathers facts for secrets in the logging namespace '''
self.default_keys_for("secrets")
a_list = self.oc_command("get", "secrets", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None and item["type"] == "Opaque":
result = dict(
keys=item["data"].keys()
)
self.add_facts_for(comp, "secrets", name, result)
def facts_for_sccs(self):
''' Gathers facts for SCCs used with logging '''
self.default_keys_for("sccs")
scc = self.oc_command("get", "securitycontextconstraints.v1.security.openshift.io", name="privileged")
if len(scc["users"]) == 0:
return
for item in scc["users"]:
comp = self.comp(item)
if comp is not None:
self.add_facts_for(comp, "sccs", "privileged", dict())
def facts_for_clusterrolebindings(self, namespace):
''' Gathers ClusterRoleBindings used with logging '''
self.default_keys_for("clusterrolebindings")
role = self.oc_command("get", "clusterrolebindings", name="cluster-readers")
if "subjects" not in role or len(role["subjects"]) == 0:
return
for item in role["subjects"]:
comp = self.comp(item["name"])
if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
# this needs to end up nested under the service account...
def facts_for_rolebindings(self, namespace):
''' Gathers facts for RoleBindings used with logging '''
self.default_keys_for("rolebindings")
role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
if "subjects" not in role or len(role["subjects"]) == 0:
return
for item in role["subjects"]:
comp = self.comp(item["name"])
if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
# pylint: disable=no-self-use, too-many-return-statements
def comp(self, name):
''' Does a comparison to evaluate the logging component '''
if name.startswith("logging-curator-ops"):
return "curator_ops"
elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
return "kibana_ops"
elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
return "elasticsearch_ops"
elif name.startswith("logging-curator"):
return "curator"
elif name.startswith("logging-kibana") or name.startswith("kibana"):
return "kibana"
elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
return "elasticsearch"
elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
return "fluentd"
elif name.startswith("logging-mux"):
return "mux"
elif name.startswith("logging-eventrouter"):
return "eventrouter"
else:
return None
def build_facts(self):
''' Builds the logging facts and returns them '''
self.facts_for_routes(self.namespace)
self.facts_for_daemonsets(self.namespace)
self.facts_for_deploymentconfigs(self.namespace)
self.facts_for_services(self.namespace)
self.facts_for_configmaps(self.namespace)
self.facts_for_sccs()
self.facts_for_oauthclients(self.namespace)
self.facts_for_clusterrolebindings(self.namespace)
self.facts_for_rolebindings(self.namespace)
self.facts_for_secrets(self.namespace)
self.facts_for_pvcs(self.namespace)
return self.facts
def main():
''' The main method '''
module = AnsibleModule( # noqa: F405
argument_spec=dict(
admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},
oc_bin={"required": True, "type": "str"},
openshift_logging_namespace={"required": True, "type": "str"}
),
supports_check_mode=False
)
try:
cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],
module.params['openshift_logging_namespace'])
module.exit_json(
ansible_facts={"openshift_logging_facts": cmd.build_facts()}
)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
except Exception as error:
module.fail_json(msg=str(error))
if __name__ == '__main__':
main()
|
|
import os
def split_path(path):
folders = []
while True:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path != "":
folders.append(path)
break
folders.reverse()
return folders
def lex_file(path):
stream = []
try:
text = ""
with open(path, 'r') as f:
for line in f:
for char in line:
if text[-3:] == "\[[":
text = text.replace("\[[", "[[")
elif text[-3:] == "\]]":
text = text.replace("\]]", "]]")
elif text[-3:] == "\{{":
text = text.replace("\{{", "{{")
elif text[-3:] == "\}}":
text = text.replace("\}}", "}}")
elif text[-2:] == "[[": # key start
stream.append(text[:-2])
stream.append("[[")
text = ""
elif text[-2:] == "]]": # key end
stream.append(text[:-2])
stream.append("]]")
text = ""
elif text[-2:] == "{{": # left macro expansion
stream.append(text[:-2])
stream.append("{{")
text = ""
elif text[-2:] == "}}": # right macro expansion
stream.append(text[:-2])
stream.append("}}")
text = ""
text = text + char
stream.append(text)
except IOError as error:
pass
return stream
class Tree:
def __init__(self, path):
initial_name = split_path(path)[-1]
self.macros = {}
self.options = {}
self.model = {}
self.children = []
self.parent = None
self.file_paths = []
self.files = []
self.name = initial_name
for name in os.listdir(path):
next_path = os.path.join(path, name)
if os.path.isdir(next_path):
tree = Tree(next_path)
tree.parent = self
self.children.append(tree)
else:
if name == "macros":
self.macros = self.parse_file(next_path, False)
elif name == "options":
self.options = self.parse_file(next_path, False)
elif name == "model":
self.model = self.parse_file(next_path, True)
if "name" not in self.model:
raise ValueError("Model file must specify a `name`")
else:
self.file_paths.append(next_path)
def get_full_name(self):
result = []
full_name = ""
prefix_name = ""
tree = self
while tree.parent:
result.append(tree.name)
tree = tree.parent
result.append(tree.name)
for name in reversed(result):
full_name = full_name + name + "::"
for name in reversed(result):
prefix_name = prefix_name + name + "/"
return full_name[:-2], prefix_name
def expand_macro(self, name):
tree = self
while tree.parent:
if name in tree.macros:
return tree.macros[name]
tree = tree.parent
if name in tree.macros:
return tree.macros[name]
return None
def find_model(self):
tree = self
while tree.parent:
if tree.model:
return tree.model
tree = tree.parent
if tree.model:
return tree.model
raise Exception("No model could be found for file")
def fix_expanded_stream(self, stream):
result = []
text = ""
for token in stream:
if token == "[[":
result.append(text)
text = ""
result.append("[[")
elif token == "]]":
result.append(text)
text = ""
result.append("]]")
else:
text = text + token
result.append(text)
return result
def parse_file(self, path, allow_expansion=True):
stream = lex_file(path)
if len(stream) == 0:
raise ValueError("Lexer error, are you doing \
`[[key]] value {\{macro\}} value` ? file: {0}".format(path))
estream = []
ignore = []
text = {}
if allow_expansion:
for i in range(len(stream)):
if stream[i] == "{{":
if i + 1 >= len(stream):
raise ValueError( \
"Expected macro name after {{, \
file: {0}".format(path))
elif stream[i + 1] == "{{":
raise ValueError( \
"Can't have nested macros, file: {0}".format(path))
elif stream[i + 1] == "}}":
raise ValueError( \
"Macro name must be nonempty, \
file: {0}".format(path))
if i + 2 >= len(stream) or stream[i + 2] != "}}":
raise ValueError( \
"Expected closing }}, file: {0}".format(path))
value = self.expand_macro(stream[i + 1].strip())
if value:
estream.append(value)
ignore.append(i + 1)
else:
raise ValueError( \
"Macro name does not exist, \
file: {0}".format(path))
elif stream[i] != "}}" and i not in ignore:
estream.append(stream[i])
estream = self.fix_expanded_stream(estream)
else:
estream = stream
for i in range(len(estream)):
if estream[i] == "[[":
if i + 1 >= len(estream):
raise ValueError( \
"Expected key name after [[, file: {0}".format(path))
elif estream[i + 1] == "[[":
raise ValueError( \
"Can't have nested key declarations, \
file: {0}".format(path))
elif estream[i + 1] == "]]":
raise ValueError( \
"Key name must be nonempty, file: {0}".format(path))
if i + 2 >= len(estream) or estream[i + 2] != "]]":
raise ValueError( \
"Expected closing ]], file: {0}".format(path))
if i + 3 >= len(estream) or \
estream[i + 3] == "[[" or estream[i + 3] == "]]":
raise ValueError(
"Expected field value after key declaration, \
file: {0}".format(path))
text[estream[i + 1].strip()] = \
estream[i + 3].strip().encode("utf8")
if not text:
raise ValueError("Unexpected parser error, file: {0}".format(path))
return text
def parse(self):
for path in self.file_paths:
f = self.parse_file(path)
full_name, prefix_name = self.get_full_name()
f["Filename"] = prefix_name + split_path(path)[-1]
f["ffsDeckname"] = full_name
f["ffsModel"] = self.find_model()
self.files.append(f)
for child in self.children:
f = child.parse()
self.files.extend(f)
return self.files
|
|
import pytest
from flex.constants import (
STRING,
NULL,
NUMBER,
INTEGER,
OBJECT,
)
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.loading.definitions.schema import schema_validator
from tests.utils import (
assert_path_not_in_errors,
assert_message_in_errors,
)
def test_minimum_and_maximum_are_not_required():
"""
Ensure that neither the `minimum` nor the `maximum` fields of a schema are
required.
"""
try:
schema_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('minimum', errors)
assert_path_not_in_errors('maximum', errors)
@pytest.mark.parametrize(
'value',
('abc', [1, 2], None, {'a': 1}, True, False),
)
def test_minimum_for_invalid_types(value):
"""
Ensure that the value of `minimum` is validated to be numeric.
"""
with pytest.raises(ValidationError) as err:
schema_validator({'minimum': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'minimum.type',
)
@pytest.mark.parametrize(
'types',
(
INTEGER,
NUMBER,
(INTEGER, NUMBER),
(STRING, OBJECT, INTEGER, NULL),
),
)
def test_correct_types_for_minimum(types):
try:
schema_validator({
'minimum': 7,
'type': types,
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('minimum', errors)
assert_path_not_in_errors('type', errors)
@pytest.mark.parametrize(
'types',
(
STRING,
(NULL,),
(OBJECT, STRING),
),
)
def test_no_correct_type_for_minimum(types):
with pytest.raises(ValidationError) as err:
schema_validator({
'minimum': 7,
'type': types,
})
assert_message_in_errors(
MESSAGES['type']['invalid_type_for_minimum'],
err.value.detail,
'type',
)
@pytest.mark.parametrize(
'value',
('abc', [1, 2], None, {'a': 1}, True, False),
)
def test_maximum_for_invalid_types(value):
"""
Ensure that the value of `maximum` is validated to be numeric.
"""
with pytest.raises(ValidationError) as err:
schema_validator({'maximum': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'maximum.type',
)
@pytest.mark.parametrize(
'types',
(
INTEGER,
NUMBER,
(INTEGER, NUMBER),
(STRING, OBJECT, INTEGER, NULL),
),
)
def test_correct_types_for_maximum(types):
try:
schema_validator({
'maximum': 7,
'type': types,
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('maximum', errors)
assert_path_not_in_errors('type', errors)
@pytest.mark.parametrize(
'types',
(
STRING,
(NULL,),
(OBJECT, STRING),
),
)
def test_no_correct_type_for_maximum(types):
with pytest.raises(ValidationError) as err:
schema_validator({
'maximum': 7,
'type': types,
})
assert_message_in_errors(
MESSAGES['type']['invalid_type_for_maximum'],
err.value.detail,
'type',
)
def test_minimum_is_required_if_exclusive_provided():
"""
Ensure that when `exclusiveMinimum` is set, that `minimum` is required.
"""
with pytest.raises(ValidationError) as err:
schema_validator(
{'exclusiveMinimum': True},
)
assert_message_in_errors(
MESSAGES['minimum']['exclusive_minimum_required_minimum'],
err.value.detail,
'minimum',
)
def test_maximum_is_required_if_exclusive_provided():
"""
Ensure that when `exclusiveMaximum` is set, that `maximum` is required.
"""
with pytest.raises(ValidationError) as err:
schema_validator(
{'exclusiveMaximum': True},
)
assert_message_in_errors(
MESSAGES['maximum']['exclusive_maximum_required_maximum'],
err.value.detail,
'maximum',
)
def test_maximum_must_be_greater_than_minimum():
"""
Test that the maximum value must be greater than or equal to the minimum.
"""
with pytest.raises(ValidationError) as err:
schema_validator({
'maximum': 10,
'minimum': 11,
})
assert_message_in_errors(
MESSAGES['maximum']['must_be_greater_than_minimum'],
err.value.detail,
'maximum',
)
def test_exclusive_minimum_and_maximum_are_not_required():
try:
schema_validator({})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('exclusiveMinimum', errors)
assert_path_not_in_errors('exclusiveMaximum', errors)
@pytest.mark.parametrize(
'value',
('abc', [1, 2], None, {'a': 1}, 1, 1.1),
)
def test_exclusive_minimum_for_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({
'exclusiveMinimum': value,
})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'exclusiveMinimum.type',
)
@pytest.mark.parametrize(
'value',
('abc', [1, 2], None, {'a': 1}, 1, 1.1),
)
def test_exclusive_maximum_for_invalid_types(value):
with pytest.raises(ValidationError) as err:
schema_validator({
'exclusiveMaximum': value,
})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'exclusiveMaximum.type',
)
def test_exclusive_minimum_and_maximum_for_valid_values():
try:
schema_validator({'exclusiveMinimum': True, 'exclusiveMaximum': True})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors('exclusiveMinimum', errors)
assert_path_not_in_errors('exclusiveMaximum', errors)
|
|
#!/usr/bin/env python
import os, sys, subprocess, inspect, shutil, glob, optparse
ROOTDIR = os.path.abspath(os.path.dirname(inspect.getframeinfo(inspect.currentframe())[0]))
WAFPATH = os.path.join(ROOTDIR, 'var', 'waf')
LIBDIR = ''
INCLUDEDIR = ''
BINDIR = ''
PREFIX = ROOTDIR
ARCH = ''
MSVC = ''
def log(msg):
sys.stdout.write(msg)
sys.stdout.flush()
def get_msvctarget():
global ARCH
vctarget = ''
if ARCH == 'x64': vctarget='x86_amd64' # use cross-compiler for compatibility
elif ARCH == 'x86': vctarget='x86'
return vctarget
def get_msvccompiler():
global MSVC
compilers = {'9': 'msvc 9.0', '10': 'msvc 10.0', '11': 'msvc 11.0', '12': 'msvc 12.0'}
return compilers[MSVC]
def install_lua():
lua_srcfiles = {\
'x86-vc12': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win32_dll12_lib.zip/download',
'x64-vc12': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win64_dll12_lib.zip/download',
'x64-vc11': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win64_dll11_lib.zip/download',
'x86-vc11': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win32_dll11_lib.zip/download',
'x86-vc9': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win32_dll9_lib.zip/download',
'x64-vc9': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win64_dll9_lib.zip/download',
'x64-vc10': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win64_dll10_lib.zip/download',
'x86-vc10': 'http://sourceforge.net/projects/luabinaries/files/5.2.1/Windows%20Libraries/Dynamic/lua-5.2.1_Win32_dll10_lib.zip/download'
}
luadir = os.path.join(ROOTDIR, '3rdparty', 'tmp', 'lua')
libfile = os.path.join(LIBDIR, 'lua.lib')
log('looking for lua...')
if os.path.isfile(libfile):
log('\t\tfound\n')
return True
log('\t\tnot found\n')
url = lua_srcfiles[ARCH + '-vc' + MSVC]
log('downloading lua binaries from "http://sourceforge.net/projects/luabinaries"...\n')
log('')
os.makedirs(luadir, exist_ok=True)
os.chdir(luadir)
if os.system('wget -N --no-check-certificate {0}'.format(url)) != 0:
os.chdir(ROOTDIR)
return False
# extract file name from url
urlsplit = url.split('/')
filename = ''
for u in urlsplit:
if '.zip' in u:
filename = u
break
if os.system('unzip -o ' + filename) != 0:
os.chdir(ROOTDIR)
return False
# copy important files
shutil.copyfile('lua52.dll', os.path.join(BINDIR, 'lua52.dll'))
shutil.copyfile('lua52.lib', os.path.join(LIBDIR, 'lua.lib'))
# headers
includes = os.path.join(INCLUDEDIR, 'lua')
headers = glob.glob('include/*.h')
os.makedirs(includes, exist_ok=True)
for header in headers:
shutil.copyfile(header, os.path.join(includes, os.path.basename(header)))
os.chdir(ROOTDIR)
return True
def install_assimp():
log('looking for assimp...')
if os.path.isfile(os.path.join(LIBDIR, 'assimp.lib')):
log('\t\tfound\n')
return True
log('\t\tnot found\n')
url = 'http://sourceforge.net/projects/assimp/files/assimp-3.1/assimp-3.1.1-win-binaries.zip/download'
log('downloading assimp binaries from "http://sourceforge.net/projects/assimp"...\n')
assimpdir = os.path.join(ROOTDIR, '3rdparty', 'tmp', 'assimp')
os.makedirs(assimpdir, exist_ok=True)
os.chdir(assimpdir)
if os.system('wget -N --no-check-certificate {0}'.format(url)) != 0:
os.chdir(ROOTDIR)
return False
# extract file name from url
urlsplit = url.split('/')
filename = ''
for u in urlsplit:
if '.zip' in u:
filename = u
break
if os.system('unzip -o ' + filename) != 0:
os.chdir(ROOTDIR)
return False
os.chdir('assimp-3.1.1-win-binaries')
# copy important files
# libs
bindirs = {'x64': 'bin64', 'x86': 'bin32'}
libdirs = {'x64': 'lib64', 'x64': 'lib32'}
dlls = glob.glob(os.path.join(bindirs[ARCH], '*.dll'))
libs = glob.glob(os.path.join(libdirs[ARCH], '*.lib'))
print()
for lib in libs:
shutil.copyfile(lib, os.path.join(LIBDIR, os.path.basename(lib)))
for dll in dlls:
shutil.copyfile(dll, os.path.join(BINDIR, os.path.basename(dll)))
# headers
includes = os.path.join(INCLUDEDIR, 'assimp')
headers = glob.glob('include/assimp/*')
os.makedirs(includes, exist_ok=True)
for header in headers:
if os.path.isfile(header):
shutil.copyfile(header, os.path.join(includes, os.path.basename(header)))
os.makedirs(os.path.join(includes, 'Compiler'), exist_ok=True)
headers = glob.glob('include/assimp/Compiler/*')
for header in headers:
shutil.copyfile(header, os.path.join(includes, 'Compiler', os.path.basename(header)))
os.chdir(ROOTDIR)
return True
def install_glfw():
log('looking for glfw...')
if os.path.isfile(os.path.join(LIBDIR, 'glfw.lib')) and \
os.path.isfile(os.path.join(BINDIR, 'glfw3.dll')):
log('\t\tfound\n')
return True
log('\t\tnot found\n')
urls = {\
'x86': 'http://sourceforge.net/projects/glfw/files/glfw/3.0.4/glfw-3.0.4.bin.WIN32.zip/download',
'x64': 'http://sourceforge.net/projects/glfw/files/glfw/3.0.4/glfw-3.0.4.bin.WIN64.zip/download'}
log('downloading glfw binaries from "http://www.glfw.org"...\n')
glfwdir = os.path.join(ROOTDIR, '3rdparty', 'tmp', 'glfw')
os.makedirs(glfwdir, exist_ok=True)
os.chdir(glfwdir)
if os.system('wget -N --no-check-certificate {0}'.format(urls[ARCH])) != 0:
os.chdir(ROOTDIR)
return False
# extract file name from url
urlsplit = urls[ARCH].split('/')
filename = ''
for u in urlsplit:
if '.zip' in u:
filename = u
break
if os.system('unzip -o %s' % filename) != 0:
os.chdir(ROOTDIR)
return False
dirname = os.path.splitext(filename)[0]
os.chdir(dirname)
# copy important files
# libs
shutil.copyfile('lib-msvc%s0/glfw3.dll' % MSVC, os.path.join(BINDIR, 'glfw3.dll'))
shutil.copyfile('lib-msvc%s0/glfw3dll.lib' % MSVC, os.path.join(LIBDIR, 'glfw.lib'))
# headers
includes = os.path.join(INCLUDEDIR, 'GLFW')
headers = glob.glob('include/GLFW/*.h')
os.makedirs(includes, exist_ok=True)
for header in headers:
shutil.copyfile(header, os.path.join(includes, os.path.basename(header)))
os.chdir(ROOTDIR)
return True
def install_glew():
log('looking for glew...')
if os.path.isfile(os.path.join(LIBDIR, 'glew.lib')):
log('\t\tfound\n')
return True
log('\t\tnot found\n')
url = 'https://sourceforge.net/projects/glew/files/glew/1.10.0/glew-1.10.0-win32.zip/download'
log('downloading glew binaries from "https://sourceforge.net/projects/glew"...\n')
glewdir = os.path.join(ROOTDIR, '3rdparty', 'tmp', 'glew')
os.makedirs(glewdir, exist_ok=True)
os.chdir(glewdir)
if os.system('wget -N --no-check-certificate {0}'.format(url)) != 0:
os.chdir(ROOTDIR)
return False
# extract file name from url
urlsplit = url.split('/')
filename = ''
for u in urlsplit:
if '.zip' in u:
filename = u
break
if os.system('unzip -o ' + filename) != 0:
os.chdir(ROOTDIR)
return False
dirs = glob.glob('*')
for d in dirs:
if os.path.isdir(d):
os.chdir(d)
break
# copy important files
dirs = {'x64': 'x64', 'x86': 'Win32'}
d = dirs[ARCH]
# libs
shutil.copyfile(os.path.join('bin', 'Release', d, 'glew32.dll'),
os.path.join(BINDIR, 'glew32.dll'))
shutil.copyfile(os.path.join('lib', 'Release', d, 'glew32.lib'),
os.path.join(LIBDIR, 'glew.lib'))
# headers
includes = os.path.join(INCLUDEDIR, 'GL')
headers = glob.glob('include/GL/*.h')
os.makedirs(includes, exist_ok=True)
for header in headers:
shutil.copyfile(header, os.path.join(includes, os.path.basename(header)))
os.chdir(ROOTDIR)
return True
def install_efsw():
log('looking for efsw...')
if os.path.isfile(os.path.join(LIBDIR, 'efsw.lib')) and \
os.path.isfile(os.path.join(BINDIR, 'efsw.dll')):
log('\t\tfound\n')
return True
log('\t\tnot found\n')
url = 'https://bitbucket.org/sepul/efsw/get/5de4baca1a60.zip'
log('downloading efsw source from "https://bitbucket.org/sepul/efsw"...\n')
efswdir = os.path.join(ROOTDIR, '3rdparty', 'tmp', 'efsw')
os.makedirs(efswdir, exist_ok=True)
os.chdir(efswdir)
if os.system('wget -N --no-check-certificate {0}'.format(url)) != 0:
os.chdir(ROOTDIR)
return False
if os.system('unzip -o ' + os.path.basename(url)) != 0:
os.chdir(ROOTDIR)
return False
name = os.path.splitext(os.path.basename(url))[0]
dirname = 'sepul-efsw-' + name
os.chdir(dirname)
if os.system('python {0} configure --msvc_version="{1}" --msvc_targets={2}'.format(\
WAFPATH, get_msvccompiler(), ARCH)) != 0:
if os.system('python {0} configure --msvc_version="{1}" --msvc_targets={2}'.format(\
WAFPATH, get_msvccompiler(), get_msvctarget())) != 0:
os.chdir(ROOTDIR)
return False
if os.system('python {0} build install'.format(WAFPATH)) != 0:
os.chdir(ROOTDIR)
return False
# copy important files
# libs
shutil.copyfile('bin/efsw.dll', os.path.join(BINDIR, 'efsw.dll'))
shutil.copyfile('build/release/efsw.lib', os.path.join(LIBDIR, 'efsw.lib'))
# headers
includes = os.path.join(INCLUDEDIR, 'efsw')
headers = glob.glob('include/efsw/*.h*')
os.makedirs(includes, exist_ok=True)
for header in headers:
shutil.copyfile(header, os.path.join(includes, os.path.basename(header)))
os.chdir(ROOTDIR)
return True
def main():
parser = optparse.OptionParser()
parser.add_option('--prefix', action='store', type='string', dest='PREFIX',
help='prefix path for existing and to be installed libs', default='')
parser.add_option('--msvc', action='store', type='choice', choices=['9', '10', '11', '12'],
dest='MSVC', help='define visual studio version (active compiler)')
parser.add_option('--arch', action='store', type='choice', choices=['x86', 'x64'],
dest='ARCH', help='define target architecture that you want to build')
(options, args) = parser.parse_args()
if not options.ARCH:
parser.error('--arch argument is not given')
if not options.MSVC:
parser.error('--msvc argument is not given')
global LIBDIR, INCLUDEDIR, PREFIX, MSVC, ARCH, BINDIR
PREFIX = os.path.abspath(options.PREFIX)
LIBDIR = os.path.join(PREFIX, 'lib')
INCLUDEDIR = os.path.join(PREFIX, 'include')
BINDIR = os.path.join(PREFIX, 'bin')
ARCH = options.ARCH
MSVC = options.MSVC
log('library install path: ' + LIBDIR + '\n')
log('library install path: ' + BINDIR + '\n')
log('include install path: ' + INCLUDEDIR + '\n')
os.makedirs(INCLUDEDIR, exist_ok=True)
os.makedirs(LIBDIR, exist_ok=True)
os.makedirs(BINDIR, exist_ok=True)
if not install_lua():
log('error: could not install lua\n')
return False
if not install_assimp():
log('error: could not install assimp\n')
return False
if not install_glfw():
log('error: could not install glfw\n')
return False
if not install_glew():
log('error: could not install glew\n')
return False
if not install_efsw():
log('error: could not install efsw\n')
return False
log('ok, ready for build.\n')
r = main()
if r: sys.exit(0)
else: sys.exit(-1)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import functools
import hashlib
import json
import os
import re
import sys
import threading
import urllib.parse
import uuid
if os.name == 'nt': # noqa
import msvcrt # noqa
else: # noqa
msvcrt = None # noqa
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import wrapt
from glanceclient._i18n import _
from glanceclient import exc
_memoized_property_lock = threading.Lock()
SENSITIVE_HEADERS = ('X-Auth-Token', )
REQUIRED_FIELDS_ON_DATA = ('disk_format', 'container_format')
# Decorator for cli-args
def arg(*args, **kwargs):
def _decorator(func):
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
return func
return _decorator
def on_data_require_fields(data_fields, required=REQUIRED_FIELDS_ON_DATA):
"""Decorator to check commands' validity
This decorator checks that required fields are present when image
data has been supplied via command line arguments or via stdin
On error throws CommandError exception with meaningful message.
:param data_fields: Which fields' presence imply image data
:type data_fields: iter
:param required: Required fields
:type required: iter
:return: function decorator
"""
def args_decorator(func):
def prepare_fields(fields):
args = ('--' + x.replace('_', '-') for x in fields)
return ', '.join(args)
@functools.wraps(func)
def func_wrapper(gc, args):
# Set of arguments with data
fields = set(a[0] for a in vars(args).items() if a[1])
# Fields the conditional requirements depend on
present = fields.intersection(data_fields)
# How many conditional requirements are missing
missing = set(required) - fields
# We use get_data_file to check if data is provided in stdin
if (present or get_data_file(args)) and missing:
msg = (_("error: Must provide %(req)s when using %(opt)s.") %
{'req': prepare_fields(missing),
'opt': prepare_fields(present) or 'stdin'})
raise exc.CommandError(msg)
return func(gc, args)
return func_wrapper
return args_decorator
def schema_args(schema_getter, omit=None):
omit = omit or []
typemap = {
'string': encodeutils.safe_decode,
'integer': int,
'boolean': strutils.bool_from_string,
'array': list
}
def _decorator(func):
schema = schema_getter()
if schema is None:
param = '<unavailable>'
kwargs = {
'help': ("Please run with connection parameters set to "
"retrieve the schema for generating help for this "
"command")
}
func.__dict__.setdefault('arguments', []).insert(0, ((param, ),
kwargs))
else:
properties = schema.get('properties', {})
for name, property in properties.items():
if name in omit:
continue
param = '--' + name.replace('_', '-')
kwargs = {}
type_str = property.get('type', 'string')
if isinstance(type_str, list):
# NOTE(flaper87): This means the server has
# returned something like `['null', 'string']`,
# therefore we use the first non-`null` type as
# the valid type.
for t in type_str:
if t != 'null':
type_str = t
break
if type_str == 'array':
items = property.get('items')
kwargs['type'] = typemap.get(items.get('type'))
kwargs['nargs'] = '+'
else:
kwargs['type'] = typemap.get(type_str)
if type_str == 'boolean':
kwargs['metavar'] = '[True|False]'
else:
kwargs['metavar'] = '<%s>' % name.upper()
description = property.get('description', "")
if 'enum' in property:
if len(description):
description += " "
# NOTE(flaper87): Make sure all values are `str/unicode`
# for the `join` to succeed. Enum types can also be `None`
# therefore, join's call would fail without the following
# list comprehension
vals = [str(val) for val in property.get('enum')]
description += ('Valid values: ' + ', '.join(vals))
kwargs['help'] = description
func.__dict__.setdefault('arguments',
[]).insert(0, ((param, ), kwargs))
return func
return _decorator
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def has_version(client, version):
versions = client.get('/versions')[1].get('versions')
supported = ['SUPPORTED', 'CURRENT', 'EXPERIMENTAL']
for version_struct in versions:
if version_struct['id'] == version:
return version_struct['status'] in supported
return False
def print_cached_images(cached_images):
cache_pt = prettytable.PrettyTable(("ID",
"State",
"Last Accessed (UTC)",
"Last Modified (UTC)",
"Size",
"Hits"))
for item in cached_images:
state = "queued"
last_accessed = "N/A"
last_modified = "N/A"
size = "N/A"
hits = "N/A"
if item == 'cached_images':
state = "cached"
for image in cached_images[item]:
last_accessed = image['last_accessed']
if last_accessed == 0:
last_accessed = "N/A"
else:
last_accessed = datetime.datetime.utcfromtimestamp(
last_accessed).isoformat()
cache_pt.add_row((image['image_id'], state,
last_accessed,
datetime.datetime.utcfromtimestamp(
image['last_modified']).isoformat(),
image['size'],
image['hits']))
else:
for image in cached_images[item]:
cache_pt.add_row((image,
state,
last_accessed,
last_modified,
size,
hits))
print(cache_pt.get_string())
def print_dict_list(objects, fields):
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.align = 'l'
for o in objects:
row = []
for field in fields:
field_name = field.lower().replace(' ', '_')
# NOTE (abhishekk) mapping field to actual name in the
# structure.
if field_name == 'task_id':
field_name = 'id'
data = o.get(field_name, '')
row.append(data)
pt.add_row(row)
print(encodeutils.safe_decode(pt.get_string()))
def print_list(objs, fields, formatters=None, field_settings=None):
'''Prints a list of objects.
@param objs: Objects to print
@param fields: Fields on each object to be printed
@param formatters: Custom field formatters
'''
formatters = formatters or {}
field_settings = field_settings or {}
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in field_settings:
for setting, value in field_settings[field].items():
setting_dict = getattr(pt, setting)
setting_dict[field] = value
if field in formatters:
row.append(formatters[field](o))
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, None) or ''
row.append(data)
count = 0
# Converts unicode values in list to string
for part in row:
count = count + 1
if isinstance(part, list):
part = unicode_key_value_to_string(part)
row[count - 1] = part
pt.add_row(row)
print(encodeutils.safe_decode(pt.get_string()))
def _encode(src):
"""remove extra 'u' in PY2."""
return src
def unicode_key_value_to_string(src):
"""Recursively converts dictionary keys to strings."""
if isinstance(src, dict):
return dict((_encode(k),
_encode(unicode_key_value_to_string(v)))
for k, v in src.items())
if isinstance(src, list):
return [unicode_key_value_to_string(l) for l in src]
return _encode(src)
def print_dict(d, max_column_width=80):
pt = prettytable.PrettyTable(['Property', 'Value'], caching=False)
pt.align = 'l'
pt.max_width = max_column_width
for k, v in d.items():
if isinstance(v, (dict, list)):
v = json.dumps(v)
pt.add_row([k, v])
print(encodeutils.safe_decode(pt.get_string(sortby='Property')))
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exc.NotFound:
pass
# now try to get entity as uuid
try:
# This must be unicode for Python 3 compatibility.
# If you pass a bytestring to uuid.UUID, you will get a TypeError
uuid.UUID(encodeutils.safe_decode(name_or_id))
return manager.get(name_or_id)
except (ValueError, exc.NotFound):
pass
# finally try to find entity by name
matches = list(manager.list(filters={'name': name_or_id}))
num_matches = len(matches)
if num_matches == 0:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exc.CommandError(msg)
elif num_matches > 1:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exc.CommandError(msg)
else:
return matches[0]
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def exit(msg='', exit_code=1):
if msg:
print_err(msg)
sys.exit(exit_code)
def print_err(msg):
print(encodeutils.safe_decode(msg), file=sys.stderr)
def save_image(data, path):
"""Save an image to the specified path.
:param data: binary data of the image
:param path: path to save the image to
"""
if path is None:
# NOTE(kragniz): for py3 compatibility: sys.stdout.buffer is only
# present on py3, otherwise fall back to sys.stdout
image = getattr(sys.stdout, 'buffer',
sys.stdout)
else:
image = open(path, 'wb')
try:
for chunk in data:
image.write(chunk)
finally:
if path is not None:
image.close()
def make_size_human_readable(size):
suffix = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']
base = 1024.0
index = 0
if size is None:
size = 0
while size >= base:
index = index + 1
size = size / base
padded = '%.1f' % size
stripped = padded.rstrip('0').rstrip('.')
return '%s%s' % (stripped, suffix[index])
def get_file_size(file_obj):
"""Analyze file-like object and attempt to determine its size.
:param file_obj: file-like object.
:retval: The file's size or None if it cannot be determined.
"""
if (hasattr(file_obj, 'seek') and hasattr(file_obj, 'tell') and
file_obj.seekable()):
try:
curr = file_obj.tell()
file_obj.seek(0, os.SEEK_END)
size = file_obj.tell()
file_obj.seek(curr)
return size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the file object
# is a pipe (e.g. the user is trying
# to pipe image data to the client,
# echo testdata | bin/glance add blah...), or
# that file object is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return
else:
raise
def get_data_file(args):
if args.file:
return open(args.file, 'rb')
else:
# distinguish cases where:
# (1) stdin is not valid (as in cron jobs):
# glance ... <&-
# (2) image data is provided through standard input:
# glance ... < /tmp/file or cat /tmp/file | glance ...
# (3) no image data provided:
# glance ...
try:
os.fstat(0)
except OSError:
# (1) stdin is not valid (closed...)
return None
if not sys.stdin.isatty():
# (2) image data is provided through standard input
image = sys.stdin
if hasattr(sys.stdin, 'buffer'):
image = sys.stdin.buffer
if msvcrt:
msvcrt.setmode(image.fileno(), os.O_BINARY)
return image
else:
# (3) no image data provided
return None
def strip_version(endpoint):
"""Strip version from the last component of endpoint if present."""
# NOTE(flaper87): This shouldn't be necessary if
# we make endpoint the first argument. However, we
# can't do that just yet because we need to keep
# backwards compatibility.
if not isinstance(endpoint, str):
raise ValueError("Expected endpoint")
version = None
# Get rid of trailing '/' if present
endpoint = endpoint.rstrip('/')
url_parts = urllib.parse.urlparse(endpoint)
(scheme, netloc, path, __, __, __) = url_parts
path = path.lstrip('/')
# regex to match 'v1' or 'v2.0' etc
if re.match(r'v\d+\.?\d*', path):
version = float(path.lstrip('v'))
endpoint = scheme + '://' + netloc
return endpoint, version
def print_image(image_obj, human_readable=False, max_col_width=None):
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image_obj.items()
if item[0] not in ignore])
if 'virtual_size' in image:
image['virtual_size'] = image.get('virtual_size') or 'Not available'
if human_readable:
image['size'] = make_size_human_readable(image['size'])
if str(max_col_width).isdigit():
print_dict(image, max_column_width=max_col_width)
else:
print_dict(image)
def integrity_iter(iter, checksum):
"""Check image data integrity.
:raises: IOError
"""
try:
md5sum = hashlib.new('md5')
except ValueError:
raise IOError(errno.EPIPE,
'Corrupt image download. Expected checksum is %s '
'but md5 algorithm is not available on the client' %
checksum)
for chunk in iter:
yield chunk
if isinstance(chunk, str):
chunk = bytes(chunk, 'latin-1')
md5sum.update(chunk)
md5sum = md5sum.hexdigest()
if md5sum != checksum:
raise IOError(errno.EPIPE,
'Corrupt image download. Checksum was %s expected %s' %
(md5sum, checksum))
def serious_integrity_iter(iter, hasher, hash_value):
"""Check image data integrity using the Glance "multihash".
:param iter: iterable containing the image data
:param hasher: a hashlib object
:param hash_value: hexdigest of the image data
:raises: IOError if the hashdigest of the data is not hash_value
"""
for chunk in iter:
yield chunk
if isinstance(chunk, str):
chunk = bytes(chunk, 'latin-1')
hasher.update(chunk)
computed = hasher.hexdigest()
if computed != hash_value:
raise IOError(errno.EPIPE,
'Corrupt image download. Hash was %s expected %s' %
(computed, hash_value))
def memoized_property(fn):
attr_name = '_lazy_once_' + fn.__name__
@property
def _memoized_property(self):
if hasattr(self, attr_name):
return getattr(self, attr_name)
else:
with _memoized_property_lock:
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _memoized_property
def safe_header(name, value):
if value is not None and name in SENSITIVE_HEADERS:
h = hashlib.sha1(value)
d = h.hexdigest()
return name, "{SHA1}%s" % d
else:
return name, value
def endpoint_version_from_url(endpoint, default_version=None):
if endpoint:
endpoint, version = strip_version(endpoint)
return endpoint, version or default_version
else:
return None, default_version
def debug_enabled(argv):
if bool(env('GLANCECLIENT_DEBUG')) is True:
return True
if '--debug' in argv or '-d' in argv:
return True
return False
class IterableWithLength(object):
def __init__(self, iterable, length):
self.iterable = iterable
self.length = length
def __iter__(self):
try:
for chunk in self.iterable:
yield chunk
finally:
self.iterable.close()
def next(self):
return next(self.iterable)
# In Python 3, __next__() has replaced next().
__next__ = next
def __len__(self):
return self.length
class RequestIdProxy(wrapt.ObjectProxy):
def __init__(self, wrapped):
# `wrapped` is a tuple: (original_obj, response_obj)
super(RequestIdProxy, self).__init__(wrapped[0])
self._self_wrapped = wrapped[0]
req_id = _extract_request_id(wrapped[1])
self._self_request_ids = [req_id]
@property
def request_ids(self):
return self._self_request_ids
@property
def wrapped(self):
return self._self_wrapped
# Overriden next method to act as iterator
def next(self):
return next(self._self_wrapped)
# In Python 3, __next__() has replaced next().
__next__ = next
class GeneratorProxy(wrapt.ObjectProxy):
def __init__(self, wrapped):
super(GeneratorProxy, self).__init__(wrapped)
self._self_wrapped = wrapped
self._self_request_ids = []
def _set_request_ids(self, resp):
if self._self_request_ids == []:
req_id = _extract_request_id(resp)
self._self_request_ids = [req_id]
def _next(self):
obj, resp = next(self._self_wrapped)
self._set_request_ids(resp)
return obj
# Override generator's next method to add
# request id on each iteration
def next(self):
return self._next()
# For Python 3 compatibility
def __next__(self):
return self._next()
def __iter__(self):
return self
@property
def request_ids(self):
return self._self_request_ids
@property
def wrapped(self):
return self._self_wrapped
def add_req_id_to_object():
@wrapt.decorator
def inner(wrapped, instance, args, kwargs):
return RequestIdProxy(wrapped(*args, **kwargs))
return inner
def add_req_id_to_generator():
@wrapt.decorator
def inner(wrapped, instance, args, kwargs):
return GeneratorProxy(wrapped(*args, **kwargs))
return inner
def _extract_request_id(resp):
# TODO(rsjethani): Do we need more checks here?
return resp.headers.get('x-openstack-request-id')
|
|
"""test sparse matrix construction functions"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, matrix
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_equal, assert_raises, assert_array_almost_equal_nulp
from scipy.sparse import csr_matrix, coo_matrix
from scipy.sparse import construct
from scipy.sparse.construct import rand as sprand
sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']
#TODO check whether format=XXX is respected
class TestConstructUtils(TestCase):
def test_spdiags(self):
diags1 = array( [[ 1, 2, 3, 4, 5]] )
diags2 = array( [[ 1, 2, 3, 4, 5],
[ 6, 7, 8, 9,10]] )
diags3 = array( [[ 1, 2, 3, 4, 5],
[ 6, 7, 8, 9,10],
[11,12,13,14,15]] )
cases = []
cases.append( (diags1, 0, 1, 1, [[1]]) )
cases.append( (diags1, [0], 1, 1, [[1]]) )
cases.append( (diags1, [0], 2, 1, [[1],[0]]) )
cases.append( (diags1, [0], 1, 2, [[1,0]]) )
cases.append( (diags1, [1], 1, 2, [[0,2]]) )
cases.append( (diags1,[-1], 1, 2, [[0,0]]) )
cases.append( (diags1, [0], 2, 2, [[1,0],[0,2]]) )
cases.append( (diags1,[-1], 2, 2, [[0,0],[1,0]]) )
cases.append( (diags1, [3], 2, 2, [[0,0],[0,0]]) )
cases.append( (diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]) )
cases.append( (diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]) )
cases.append( (diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]) )
cases.append( (diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]) )
cases.append( (diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]) )
cases.append( (diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],
[0,0,0,4,0,0],
[0,0,0,0,5,0],
[6,0,0,0,0,0],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]) )
cases.append( (diags3, [-1,0,1], 6, 6, [[ 6,12, 0, 0, 0, 0],
[ 1, 7,13, 0, 0, 0],
[ 0, 2, 8,14, 0, 0],
[ 0, 0, 3, 9,15, 0],
[ 0, 0, 0, 4,10, 0],
[ 0, 0, 0, 0, 5, 0]]) )
cases.append( (diags3, [-4,2,-1], 6, 5, [[ 0, 0, 8, 0, 0],
[11, 0, 0, 9, 0],
[ 0,12, 0, 0,10],
[ 0, 0,13, 0, 0],
[ 1, 0, 0,14, 0],
[ 0, 2, 0, 0,15]]) )
for d,o,m,n,result in cases:
assert_equal( construct.spdiags(d,o,m,n).todense(), result )
def test_diags(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append( (a[:1], 0, (1, 1), [[1]]) )
cases.append( ([a[:1]], [0], (1, 1), [[1]]) )
cases.append( ([a[:1]], [0], (2, 1), [[1],[0]]) )
cases.append( ([a[:1]], [0], (1, 2), [[1,0]]) )
cases.append( ([a[:1]], [1], (1, 2), [[0,1]]) )
cases.append( ([a[:2]], [0], (2, 2), [[1,0],[0,2]]) )
cases.append( ([a[:1]],[-1], (2, 2), [[0,0],[1,0]]) )
cases.append( ([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]) )
cases.append( ([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]) )
cases.append( ([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]) )
cases.append( ([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]) )
cases.append( ([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]) )
cases.append( ([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],
[0,0,0,2,0,0],
[0,0,0,0,3,0],
[6,0,0,0,0,4],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]) )
cases.append( ([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[ 6,11, 0, 0, 0],
[ 1, 7,12, 0, 0],
[ 0, 2, 8,13, 0],
[ 0, 0, 3, 9,14],
[ 0, 0, 0, 4,10]]) )
cases.append( ([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[ 0, 0, 6, 0, 0],
[11, 0, 0, 7, 0],
[ 0,12, 0, 0, 8],
[ 0, 0,13, 0, 0],
[ 1, 0, 0,14, 0],
[ 0, 2, 0, 0,15]]) )
# scalar case: broadcasting
cases.append( ([1,-2,1], [1,0,-1], (3, 3), [[ -2, 1, 0],
[ 1, -2, 1],
[ 0, 1, -2]]) )
for d, o, shape, result in cases:
try:
assert_equal(construct.diags(d, o, shape=shape).todense(),
result)
if shape[0] == shape[1] and hasattr(d[0], '__len__'):
# should be able to find the shape automatically
assert_equal(construct.diags(d, o).todense(), result)
except:
print("%r %r %r" % (d, o, shape))
raise
def test_diags_bad(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append( ([a[:0]], 0, (1, 1)) )
cases.append( ([a], [0], (1, 1)) )
cases.append( ([a[:3],b], [0,2], (3, 3)) )
cases.append( ([a[:4],b,c[:3]], [-1,0,1], (5, 5)) )
cases.append( ([a[:2],c,b[:3]], [-4,2,-1], (6, 5)) )
cases.append( ([a[:2],c,b[:3]], [-4,2,-1], None) )
cases.append( ([], [-4,2,-1], None) )
cases.append( ([1], [-4], (4, 4)) )
cases.append( ([a[:0]], [-1], (1, 2)) )
cases.append( ([a], 0, None))
for d, o, shape in cases:
try:
assert_raises(ValueError, construct.diags, d, o, shape)
except:
print("%r %r %r" % (d, o, shape))
raise
assert_raises(TypeError, construct.diags, [[None]], [0])
def test_diags_vs_diag(self):
# Check that
#
# diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
#
np.random.seed(1234)
for n_diags in [1, 2, 3, 4, 5, 10]:
n = 1 + n_diags//2 + np.random.randint(0, 10)
offsets = np.arange(-n+1, n-1)
np.random.shuffle(offsets)
offsets = offsets[:n_diags]
diagonals = [np.random.rand(n - abs(q)) for q in offsets]
mat = construct.diags(diagonals, offsets)
dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
if len(offsets) == 1:
mat = construct.diags(diagonals[0], offsets[0])
dense_mat = np.diag(diagonals[0], offsets[0])
assert_array_almost_equal_nulp(mat.todense(), dense_mat)
def test_diags_dtype(self):
x = construct.diags([2.2], [0], shape=(2, 2), dtype=int)
assert_equal(x.dtype, int)
assert_equal(x.todense(), [[2, 0], [0, 2]])
def test_diags_one_diagonal(self):
d = list(range(5))
for k in range(-5, 6):
assert_equal(construct.diags(d, k).toarray(),
construct.diags([d], [k]).toarray())
def test_identity(self):
assert_equal(construct.identity(1).toarray(), [[1]])
assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]])
I = construct.identity(3, dtype='int8', format='dia')
assert_equal( I.dtype, np.dtype('int8') )
assert_equal( I.format, 'dia' )
for fmt in sparse_formats:
I = construct.identity( 3, format=fmt )
assert_equal( I.format, fmt )
assert_equal( I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_eye(self):
assert_equal(construct.eye(1,1).toarray(), [[1]])
assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]])
assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]])
assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])
assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16'))
for m in [3, 5]:
for n in [3, 5]:
for k in range(-5,6):
assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k))
if m == n:
assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k))
def test_eye_one(self):
assert_equal(construct.eye(1).toarray(), [[1]])
assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]])
I = construct.eye(3, dtype='int8', format='dia')
assert_equal( I.dtype, np.dtype('int8') )
assert_equal( I.format, 'dia' )
for fmt in sparse_formats:
I = construct.eye( 3, format=fmt )
assert_equal( I.format, fmt )
assert_equal( I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_kron(self):
cases = []
cases.append(array([[ 0]]))
cases.append(array([[-1]]))
cases.append(array([[ 4]]))
cases.append(array([[10]]))
cases.append(array([[0],[0]]))
cases.append(array([[0,0]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14]]))
cases.append(array([[5,4],[0,0],[6,0]]))
cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
cases.append(array([[0,1,0,2,0,5,8]]))
cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
for a in cases:
for b in cases:
result = construct.kron(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(a,b)
assert_array_equal(result,expected)
def test_kronsum(self):
cases = []
cases.append(array([[ 0]]))
cases.append(array([[-1]]))
cases.append(array([[ 4]]))
cases.append(array([[10]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
for a in cases:
for b in cases:
result = construct.kronsum(csr_matrix(a),csr_matrix(b)).todense()
expected = np.kron(np.eye(len(b)), a) + \
np.kron(b, np.eye(len(a)))
assert_array_equal(result,expected)
def test_vstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5,6]])
expected = matrix([[1, 2],
[3, 4],
[5, 6]])
assert_equal( construct.vstack( [A,B] ).todense(), expected )
def test_hstack(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
expected = matrix([[1, 2, 5],
[3, 4, 6]])
assert_equal( construct.hstack( [A,B] ).todense(), expected )
def test_bmat(self):
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
expected = matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
assert_equal( construct.bmat( [[A,B],[None,C]] ).todense(), expected )
expected = matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
assert_equal( construct.bmat( [[A,None],[None,C]] ).todense(), expected )
expected = matrix([[0, 5],
[0, 6],
[7, 0]])
assert_equal( construct.bmat( [[None,B],[C,None]] ).todense(), expected )
#TODO test failure cases
def test_block_diag_basic(self):
""" basic test for block_diag """
A = coo_matrix([[1,2],[3,4]])
B = coo_matrix([[5],[6]])
C = coo_matrix([[7]])
expected = matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
assert_equal(construct.block_diag((A, B, C)).todense(), expected)
def test_block_diag_scalar_1d_args(self):
""" block_diag with scalar and 1d arguments """
# one 1d matrix and a scalar
assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),
[[2, 3, 0], [0, 0, 4]])
def test_block_diag_1(self):
""" block_diag with one matrix """
assert_equal(construct.block_diag([[1, 0]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1, 0]]]).todense(),
matrix([[1, 0]]))
assert_equal(construct.block_diag([[[1], [0]]]).todense(),
matrix([[1], [0]]))
# just on scalar
assert_equal(construct.block_diag([1]).todense(),
matrix([[1]]))
def test_rand(self):
# Simple sanity checks for sparse.rand
for t in [np.float32, np.float64, np.longdouble]:
x = sprand(5, 10, density=0.1, dtype=t)
assert_equal(x.dtype, t)
assert_equal(x.shape, (5, 10))
assert_equal(x.nonzero()[0].size, 5)
x = sprand(5, 10, density=0.1)
assert_equal(x.dtype, np.double)
for fmt in ['coo', 'csc', 'csr', 'lil']:
x = sprand(5, 10, format=fmt)
assert_equal(x.format, fmt)
assert_raises(ValueError, lambda: sprand(5, 10, 1.1))
assert_raises(ValueError, lambda: sprand(5, 10, -0.1))
if __name__ == "__main__":
run_module_suite()
|
|
import calendar
import datetime
import os
import sqlite3
from typing import Any, Dict, List, Optional, Tuple, Union
DateTimeType = datetime.datetime
AnyDateType = Union[DateTimeType, datetime.date]
# This type is acts like a Dict[str, Any], but
# mypy thinks it is a Tuple[Any, ...].
QueryItemType = Tuple[Any, ...]
def get_now() -> DateTimeType:
"""
Returns timedate object today's date
with time zeroed out.
"""
now = datetime.datetime.now()
now = datetime.datetime(
year=now.year, month=now.month, day=now.day, hour=0, minute=0
)
return now
def dict_factory(cursor: Any, row: Any) -> Dict[Any, Any]:
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class Events:
"""
Return results should be a dictionary or list of dictionaries.
:param id: The primary key, integer.
:param name: The name of event, text.
:param start: The start of the event in YYYY-MM-DD HH:MM:SS.
:param end: (optional) The end of the event using timestamp above.
:param note: (optional) Note about event, text.
"""
def __init__(self, database_name: str) -> None:
self.db = database_name
if not os.path.isfile(self.db):
self._create_database()
def __str__(self) -> str:
return self.db
def __repr__(self) -> str:
return f"<Events: {self.db}>"
def __len__(self) -> int:
conn = sqlite3.connect(self.db)
with conn:
cur = conn.cursor()
cur.execute("SELECT COUNT(1) FROM events;")
return cur.fetchone()[0]
def _create_database(self) -> None:
"""Create database with tables."""
conn = sqlite3.connect(self.db)
with conn:
cur = conn.cursor()
cur.executescript(
"""CREATE TABLE events
(id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
start TEXT NOT NULL,
end TEXT NULL,
note TEXT NULL);
CREATE VIRTUAL TABLE search_events USING fts4(
id INTEGER PRIMARY KEY,
content TEXT NOT NULL);
"""
)
def save(
self,
name: str,
start: DateTimeType,
end: Optional[DateTimeType] = None,
note: Optional[str] = None,
) -> None:
"""Add event."""
conn = sqlite3.connect(self.db)
with conn:
cur = conn.cursor()
cur.execute(
"INSERT INTO events VALUES (NULL, ?, ?, ?, ?);",
(name, start, end, note),
)
id = cur.lastrowid
self._save_search(id, name, note)
def _save_search(self, id: int, name: str, note: Optional[str] = None) -> None:
"""Creates or update search table."""
if note:
content = "\n".join((name, note))
else:
content = name
conn = sqlite3.connect(self.db)
with conn:
cur = conn.cursor()
cur.execute(
"""INSERT OR REPLACE INTO search_events (id, content)
VALUES (?, ?);""",
(id, content),
)
def update(self, id: int, values: Dict[str, Any]) -> None:
"""
Update given id
:param id: The pk of event
:param values: A dictionary of the columns you want to change,
id CANNOT be change.
"""
if type(values) != dict:
raise TypeError("Values must be a dictionary.")
event = self.get_by_id(id)
for k, v in values.items():
if k in ["name", "start", "end", "note"]:
event[k] = v
conn = sqlite3.connect(self.db)
with conn:
cur = conn.cursor()
cur.execute(
"""UPDATE events
SET name=:name, start=:start, end=:end, note=:note
WHERE id=:id;""",
event,
)
self._save_search(event["id"], event["name"], event["note"])
def get_all(self) -> List[QueryItemType]:
"""Get all events."""
conn = sqlite3.connect(self.db)
with conn:
conn.row_factory = dict_factory
cur = conn.cursor()
cur.execute(
"""SELECT *
FROM events
ORDER BY datetime(start)
DESC;"""
)
query = cur.fetchall()
return query
def get_by_id(self, id: int) -> Any:
"""Get by id"""
conn = sqlite3.connect(self.db)
with conn:
conn.row_factory = dict_factory
cur = conn.cursor()
cur.execute("SELECT * FROM events WHERE id=? ;", (id,))
query = cur.fetchone()
return query
def search(self, term: str) -> List[QueryItemType]:
"""
Search events.
See http://www.sqlite.org/fts3.html#section_3 for query options.
"""
conn = sqlite3.connect(self.db)
with conn:
conn.row_factory = dict_factory
cur = conn.cursor()
cur.execute(
"""SELECT *
FROM search_events
JOIN events ON search_events.id = events.id
WHERE search_events MATCH ?;""",
(term,),
)
query = cur.fetchall()
return query
def _query_between_date(
self, start: AnyDateType, end: AnyDateType
) -> List[QueryItemType]:
"""
Query between two days.
Return list
"""
conn = sqlite3.connect(self.db)
with conn:
conn.row_factory = dict_factory
cur = conn.cursor()
cur.execute(
"""SELECT * FROM events WHERE
date(start)
BETWEEN date(?) AND date(?) OR
date(end) >= date(?)
ORDER BY datetime(start)
DESC;""",
(start, end, start),
)
query = cur.fetchall()
return query
def get_today(self) -> List[QueryItemType]:
"""Get today's events."""
today = get_now()
tomorrow = today + datetime.timedelta(days=1)
return self._query_between_date(today, tomorrow)
def get_week(self) -> List[QueryItemType]:
"""Get this week's events."""
today = get_now()
begin = 0 - today.weekday()
end = 6 - today.weekday()
wk_begin = today + datetime.timedelta(days=begin)
wk_end = today + datetime.timedelta(days=end)
return self._query_between_date(wk_begin, wk_end)
def get_month(self) -> List[QueryItemType]:
"""Get this month's events."""
today = get_now()
int_last_day_month = calendar.monthrange(today.year, today.month)[1]
last_day_month = datetime.date(today.year, today.month, int_last_day_month)
start_day_month = datetime.date(today.year, today.month, 1)
return self._query_between_date(start_day_month, last_day_month)
|
|
from __future__ import unicode_literals
from django.test import TestCase
from django.core.exceptions import FieldError
from .models import (
Domain, Kingdom, Phylum, Klass, Order, Family, Genus, Species, HybridSpecies,
Pizza, TaggedItem, Bookmark,
)
class SelectRelatedTests(TestCase):
@classmethod
def create_tree(cls, stringtree):
"""
Helper to create a complete tree.
"""
names = stringtree.split()
models = [Domain, Kingdom, Phylum, Klass, Order, Family, Genus, Species]
assert len(names) == len(models), (names, models)
parent = None
for name, model in zip(names, models):
try:
obj = model.objects.get(name=name)
except model.DoesNotExist:
obj = model(name=name)
if parent:
setattr(obj, parent.__class__.__name__.lower(), parent)
obj.save()
parent = obj
@classmethod
def setUpTestData(cls):
cls.create_tree("Eukaryota Animalia Anthropoda Insecta Diptera Drosophilidae Drosophila melanogaster")
cls.create_tree("Eukaryota Animalia Chordata Mammalia Primates Hominidae Homo sapiens")
cls.create_tree("Eukaryota Plantae Magnoliophyta Magnoliopsida Fabales Fabaceae Pisum sativum")
cls.create_tree("Eukaryota Fungi Basidiomycota Homobasidiomycatae Agaricales Amanitacae Amanita muscaria")
def test_access_fks_without_select_related(self):
"""
Normally, accessing FKs doesn't fill in related objects
"""
with self.assertNumQueries(8):
fly = Species.objects.get(name="melanogaster")
domain = fly.genus.family.order.klass.phylum.kingdom.domain
self.assertEqual(domain.name, 'Eukaryota')
def test_access_fks_with_select_related(self):
"""
A select_related() call will fill in those related objects without any
extra queries
"""
with self.assertNumQueries(1):
person = Species.objects.select_related('genus__family__order__klass__phylum__kingdom__domain').get(name="sapiens")
domain = person.genus.family.order.klass.phylum.kingdom.domain
self.assertEqual(domain.name, 'Eukaryota')
def test_list_without_select_related(self):
"""
select_related() also of course applies to entire lists, not just
items. This test verifies the expected behavior without select_related.
"""
with self.assertNumQueries(9):
world = Species.objects.all()
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [
'Amanitacae',
'Drosophilidae',
'Fabaceae',
'Hominidae',
])
def test_list_with_select_related(self):
"""
select_related() also of course applies to entire lists, not just
items. This test verifies the expected behavior with select_related.
"""
with self.assertNumQueries(1):
world = Species.objects.all().select_related()
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [
'Amanitacae',
'Drosophilidae',
'Fabaceae',
'Hominidae',
])
def test_list_with_depth(self):
"""
Passing a relationship field lookup specifier to select_related() will
stop the descent at a particular level. This can be used on lists as
well.
"""
with self.assertNumQueries(5):
world = Species.objects.all().select_related('genus__family')
orders = [o.genus.family.order.name for o in world]
self.assertEqual(sorted(orders),
['Agaricales', 'Diptera', 'Fabales', 'Primates'])
def test_select_related_with_extra(self):
s = (Species.objects.all()
.select_related()
.extra(select={'a': 'select_related_species.id + 10'})[0])
self.assertEqual(s.id + 10, s.a)
def test_certain_fields(self):
"""
The optional fields passed to select_related() control which related
models we pull in. This allows for smaller queries.
In this case, we explicitly say to select the 'genus' and
'genus.family' models, leading to the same number of queries as before.
"""
with self.assertNumQueries(1):
world = Species.objects.select_related('genus__family')
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families),
['Amanitacae', 'Drosophilidae', 'Fabaceae', 'Hominidae'])
def test_more_certain_fields(self):
"""
In this case, we explicitly say to select the 'genus' and
'genus.family' models, leading to the same number of queries as before.
"""
with self.assertNumQueries(2):
world = Species.objects.filter(genus__name='Amanita')\
.select_related('genus__family')
orders = [o.genus.family.order.name for o in world]
self.assertEqual(orders, ['Agaricales'])
def test_single_related_field(self):
with self.assertNumQueries(1):
species = Species.objects.select_related('genus__name')
names = [s.genus.name for s in species]
self.assertEqual(sorted(names), ['Amanita', 'Drosophila', 'Homo', 'Pisum'])
def test_field_traversal(self):
with self.assertNumQueries(1):
s = (Species.objects.all()
.select_related('genus__family__order')
.order_by('id')[0:1].get().genus.family.order.name)
self.assertEqual(s, 'Diptera')
def test_depth_fields_fails(self):
self.assertRaises(
TypeError,
Species.objects.select_related,
'genus__family__order', depth=4
)
def test_none_clears_list(self):
queryset = Species.objects.select_related('genus').select_related(None)
self.assertEqual(queryset.query.select_related, False)
def test_chaining(self):
parent_1, parent_2 = Species.objects.all()[:2]
HybridSpecies.objects.create(name='hybrid', parent_1=parent_1, parent_2=parent_2)
queryset = HybridSpecies.objects.select_related('parent_1').select_related('parent_2')
with self.assertNumQueries(1):
obj = queryset[0]
self.assertEqual(obj.parent_1, parent_1)
self.assertEqual(obj.parent_2, parent_2)
class SelectRelatedValidationTests(TestCase):
"""
select_related() should thrown an error on fields that do not exist and
non-relational fields.
"""
non_relational_error = "Non-relational field given in select_related: '%s'. Choices are: %s"
invalid_error = "Invalid field name(s) given in select_related: '%s'. Choices are: %s"
def test_non_relational_field(self):
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', 'genus')):
list(Species.objects.select_related('name__some_field'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', 'genus')):
list(Species.objects.select_related('name'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', '(none)')):
list(Domain.objects.select_related('name'))
def test_many_to_many_field(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('toppings', '(none)')):
list(Pizza.objects.select_related('toppings'))
def test_reverse_relational_field(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('child_1', 'genus')):
list(Species.objects.select_related('child_1'))
def test_invalid_field(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('invalid_field', 'genus')):
list(Species.objects.select_related('invalid_field'))
with self.assertRaisesMessage(FieldError, self.invalid_error % ('related_invalid_field', 'family')):
list(Species.objects.select_related('genus__related_invalid_field'))
with self.assertRaisesMessage(FieldError, self.invalid_error % ('invalid_field', '(none)')):
list(Domain.objects.select_related('invalid_field'))
def test_generic_relations(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('tags', '')):
list(Bookmark.objects.select_related('tags'))
with self.assertRaisesMessage(FieldError, self.invalid_error % ('content_object', 'content_type')):
list(TaggedItem.objects.select_related('content_object'))
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
base.py
common base for the commands execution framework. Units of work are defined as Operations
as found in other modules like unix.py. These units of work are then packaged up and executed
within a GpCommand. A GpCommand is just a common infrastructure for executing an Operation.
The general idea is that the application developer breaks the problem down into a set of
GpCommands that need to be executed. This class also provides a queue and set of workers
for executing this set of commands.
'''
from Queue import Queue,Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pygresql.pg import DB
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import getpass
logger=gplog.get_default_logger()
GPHOME=os.environ.get('GPHOME')
SRC_GPPATH=". %s/greenplum_path.sh;" % GPHOME
# Maximum retries if sshd rejects the connection due to too many
# unauthenticated connections.
SSH_MAX_RETRY=10
# Delay before retrying ssh connection, in seconds
SSH_RETRY_DELAY=.5
class WorkerPool(object):
"""TODO:"""
def __init__(self,numWorkers=16,items=None):
self.workers=[]
self.work_queue=Queue()
self.completed_queue=Queue()
self.num_assigned=0
if items is not None:
for item in items:
self.work_queue.put(item)
self.num_assigned += 1
for i in range(0,numWorkers):
w = Worker("worker%d" % i,self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
self.logger = logger
###
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self,timeout=None):
return self.work_queue.get(block=True,timeout=timeout)
def addFinishedWorkItem(self,command):
self.completed_queue.put(command)
self.work_queue.task_done()
def addCommand(self,cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self.num_assigned += 1
def wait_and_printdots(self,command_count,quiet=True):
while self.completed_queue.qsize() < command_count:
time.sleep(1)
if not quiet:
sys.stdout.write(".")
sys.stdout.flush()
if not quiet:
print " "
self.join()
def join(self):
self.work_queue.join()
return True
def joinWorkers(self):
for w in self.workers:
w.join()
def getCompletedItems(self):
completedList=[]
try:
while True:
item=self.completed_queue.get(False)
if item is not None:
completedList.append(item)
except Empty:
return completedList
return completedList #just to be sure
def check_results(self):
""" goes through all items in the completed_queue and throws an exception at the
first one that didn't execute successfully
throws ExecutionError
"""
try:
while True:
item=self.completed_queue.get(False)
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ",item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self.completed_queue.get(False)
def isDone(self):
#TODO: not sure that qsize() is safe
return (self.num_assigned == self.completed_queue.qsize())
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
for w in self.workers:
w.haltWork()
class OperationWorkerPool(WorkerPool):
""" TODO: This is a hack! In reality, the WorkerPool should work with Operations, and
Command should be a subclass of Operation. Till then, we'll spoof the necessary Command
functionality within Operation. """
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
"""TODO:"""
pool=None
shouldStop=False
cmd=None
name=None
logger=None
def __init__(self,name,pool,timeout=0.1):
self.name=name
self.pool=pool
self.timeout=timeout
self.logger=logger
Thread.__init__(self)
def run(self):
try_count = 0
while True:
try:
if try_count == 5:
self.logger.debug("[%s] try and get work from queue..." % self.name)
try_count = 0
if self.shouldStop:
self.logger.debug('[%s] stopping' % self.name)
return
try:
self.cmd = self.pool.getNextWorkItem(timeout=self.timeout)
except TypeError:
# misleading exception raised during interpreter shutdown
return
if self.cmd is not None and not self.shouldStop:
self.logger.debug("[%s] got cmd: %s" % (self.name,self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd=None
try_count = 0
else:
try_count += 1
if self.shouldStop:
self.logger.debug("[%s] stopping" % self.name)
return
except Empty:
if self.shouldStop:
self.logger.debug("[%s] stopping" % self.name)
return
except Exception,e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd=None
try_count = 0
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
self.shouldStop=True
# this was originally coded as
#
# if self.cmd is not None:
# self.cmd.interrupt()
# self.cmd.cancel()
#
# but as observed in MPP-13808, the worker thread's run() loop may set self.cmd to None
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
# haltWorkers() from actually halting all the workers.
#
c = self.cmd
if c is not None:
c.interrupt()
c.cancel()
def signalPassiveStop(self):
self.shouldStop=True
"""
TODO: consider just having a single interface that needs to be implemented for
describing work to allow the Workers to use it. This would allow the user
to better provide logic necessary. i.e. even though the user wants to
execute a unix command... how the results are interpretted are highly
application specific. So we should have a separate level of abstraction
for executing UnixCommands and DatabaseCommands from this one.
other things to think about:
-- how to support cancel
-- how to support progress
-- undo?
-- blocking vs. unblocking
"""
#--------------------------------NEW WORLD-----------------------------------
class CommandResult():
""" Used as a way to package up the results from a GpCommand
"""
#rc,stdout,stderr,completed,halt
def __init__(self,rc,stdout,stderr,completed,halt):
self.rc=rc
self.stdout=stdout
self.stderr=stderr
self.completed=completed
self.halt=halt
pass
def printResult(self):
res = "cmd had rc=%d completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (self.rc,str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
"""
TODO: AK: This doesn't belong here if it pertains only to pg_controldata.
MPP-16318: Skip over discrepancies in the pg_controldata stdout, as it's
not this code's responsibility to judge the pg_controldata stdout. This is
especially true for 'immediate' shutdown, in which case, we won't even
care for WARNINGs or other pg_controldata discrepancies.
"""
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self,summary,cmd):
self.summary=summary
self.cmd=cmd
def __str__(self):
#TODO: improve dumping of self.cmd
return "ExecutionError: '%s' occured. Details: '%s' %s" %\
(self.summary,self.cmd.cmdStr,self.cmd.get_results().printResult())
#specify types of execution contexts.
LOCAL=1
REMOTE=2
RMI=3
NAKED=4
gExecutionContextFactory = None
#
# @param factory needs to have a createExecutionContext(self, execution_context_id, remoteHost, stdin, nakedExecutionInfo) function
#
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id,remoteHost,stdin, nakedExecutionInfo=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost,stdin)
elif execution_context_id == RMI:
return RMIExecutionContext()
elif execution_context_id == NAKED:
if remoteHost is None:
raise Exception("Programmer Error. Specified NAKED execution context but didn't provide a remoteHost")
if nakedExecutionInfo is None:
raise Exception("Programmer Error. Specified NAKED execution context but didn't provide a NakedExecutionInfo")
return NakedExecutionContext(remoteHost, stdin, nakedExecutionInfo)
class ExecutionContext():
""" An ExecutionContext defines where and how to execute the Command and how to
gather up information that are the results of the command.
"""
propagate_env_map = {}
"""
Dict. mapping environment variables to their values. See gpcoverage.py for example usage.
"""
def __init__(self):
pass
def execute(self,cmd):
pass
def interrupt(self,cmd):
pass
def cancel(self,cmd):
pass
class LocalExecutionContext(ExecutionContext):
proc=None
halt=False
completed=False
def __init__(self,stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self,cmd):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
for k, v in self.__class__.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# also propagate env from command instance specific map
for k, v in cmd.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# executable='/bin/bash' is to ensure the shell is bash. bash isn't the
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
(rc,stdout_value,stderr_value)=self.proc.communicate2(input=self.stdin)
self.completed=True
cmd.set_results(CommandResult(rc,"".join(stdout_value),"".join(stderr_value),self.completed,self.halt))
def cancel(self,cmd):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self,cmd):
self.halt=True
if self.proc:
self.proc.cancel()
##########################################################################
# Naked Execution is used to run commands where ssh keys are not exchanged
class NakedExecutionInfo:
SFTP_NONE = 0
SFTP_PUT = 1
SFTP_GET = 2
def __init__(self, passwordMap, sftp_operation = SFTP_NONE, sftp_remote = None, sftp_local = None):
self.passwordMap = passwordMap
self.sftp_operation = sftp_operation
self.sftp_remote = sftp_remote
self.sftp_local = sftp_local
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
"""
Leaves a trail of hosts to which we've ssh'ed, during the life of a particular interpreter.
"""
def __init__(self,targetHost,stdin):
LocalExecutionContext.__init__(self, stdin)
self.targetHost=targetHost
pass
def execute(self,cmd):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
for k, v in self.__class__.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
self.__class__.trail.add(self.targetHost)
# also propagate env from command instance specific map
for k, v in cmd.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# Escape " for remote execution otherwise it interferes with ssh
cmd.cmdStr = cmd.cmdStr.replace('"', '\\"')
cmd.cmdStr="ssh -o 'StrictHostKeyChecking no' %s \"%s %s\"" % (self.targetHost,SRC_GPPATH,cmd.cmdStr)
LocalExecutionContext.execute(self,cmd)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class RMIExecutionContext(ExecutionContext):
""" Leave this as a big old TODO: for now. see agent.py for some more details"""
def __init__(self):
ExecutionContext.__init__(self)
raise Exception("RMIExecutionContext - Not implemented")
pass
class Command:
""" TODO:
"""
name=None
cmdStr=None
results=None
exec_context=None
propagate_env_map={} # specific environment variables for this command instance
def __init__(self,name,cmdStr,ctxt=LOCAL,remoteHost=None,stdin=None,nakedExecutionInfo=None):
self.name=name
self.cmdStr=cmdStr
self.exec_context=createExecutionContext(ctxt,remoteHost,stdin=stdin,nakedExecutionInfo=nakedExecutionInfo)
self.remoteHost=remoteHost
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name,self.cmdStr,self.results)
else:
return "%s cmdStr='%s'" % (self.name,self.cmdStr)
def run(self,validateAfter=False):
faultPoint = os.getenv('GP_COMMAND_FAULT_POINT')
if not faultPoint or (self.name and not self.name.startswith(faultPoint)):
self.exec_context.execute(self)
else:
# simulate error
self.results = CommandResult(1,'Fault Injection','Fault Injection' ,False,True)
if validateAfter:
self.validate()
pass
def set_results(self,results):
self.results=results
def get_results(self):
return self.results
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def cancel(self):
self.exec_context.cancel(self)
def interrupt(self):
self.exec_context.interrupt(self)
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self,expected_rc=0):
"""Plain vanilla validation which expects a 0 return code."""
if self.results.rc != expected_rc:
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
"""Base class for commands that execute SQL statements. Classes
that inherit from SQLCOmmand should set cancel_conn to the pygresql
connection they wish to cancel and check self.cancel_flag."""
def __init__(self,name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self,validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
"""
"""
cmds = {}
pool = WorkerPool()
for host, cmdStr in commands.items():
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
|
|
"""Qt widgets to access a :py:class:`Register`.
"""
# standard imports
from typing import Union
import logging
# Qt imports
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QKeyEvent, QColor, QPalette
from PyQt5.QtWidgets import (QWidget, QPushButton, QLabel, QRadioButton,
QHBoxLayout, QVBoxLayout, QListWidgetItem,
QScrollArea, QSizePolicy)
# toolbox imports
from dltb.base.register import Register, RegisterEntry, Registrable
from dltb.base.register import RegisterClass
from dltb.base.register import ClassRegisterEntry, InstanceRegisterEntry
from toolbox import Toolbox
# GUI imports
from ..utils import QObserver, QDebug, QPrepareButton, protect
from ..adapter import ItemAdapter, QAdaptedListWidget, QAdaptedComboBox
# logging
LOG = logging.getLogger(__name__)
class RegisterAdapter(QObserver, ItemAdapter, qobservables={
Register: {'entry_added', 'entry_removed', 'entry_changed'}
}, itemToText=lambda entry: entry.key):
# pylint: disable=abstract-method
"""A list of register entries. Register entries are objects
registered in a :py:class:`Register`, implementing the
:py:class:`Registrable` interface. Those objects have
:py:attr:`key` property providing a unique string allowing to
refer to the object in the register.
The :py:class:`RegisterAdapter` can observe a
:py:class:`Register` and react to changes by adding
or removing entries.
"""
def __init__(self, register: Register = None, **kwargs) -> None:
"""Initialization of the :py:class:`RegisterAdapter`.
Parameters
----------
"""
super().__init__(**kwargs)
self._onlyInitialized = False
self.setRegister(register)
def setRegister(self, register: Register) -> None:
"""Set a new :py:class:`Register` for this
:py:class:`RegisterAdapter`. The entries of this list
will be updated from the register.
Arguments
---------
register: Register
The :py:class:`Register` from which the list
will be updated. If `None` the list will be cleared.
"""
self.setFromIterable(iter(()) if register is None else register)
def _updateFromRegister(self) -> None:
"""Update this :py:class:`RegisterAdapter` to reflect the
the current state of the register, taken the display flag
`onlyInitialized` into account.
"""
self.updateFromIterable(iter(()) if self._register is None else
self._register)
def register_changed(self, register: Register,
change: Register.Change, key: str = None) -> None:
# pylint: disable=invalid-name
"""Called upon a change in the :py:class:`Register`.
Arguments
---------
register: Register
The :py:class:`Register` that was changed.
change: Register.Change
key: str
The key that was changed.
"""
LOG.info("%s.register_changed: %s [%r], key=%s",
type(self).__name__, type(register).__name__, change, key)
if key is None:
# FIXME[concept]: key may be None, if the notification is
# received uppon showing the widget after it was hidden.
# This means that we can in fact not rely on key having a
# meaningful value in the GUI - if we want to change this,
# we would have to make the notification upon show more
# sophisticated!
self._updateFromRegister()
return
if change.entry_added:
self._addItem(register[key])
if change.entry_changed:
self._formatItem(register[key])
if change.entry_removed:
self._removeText(key)
#
# Filter methods for class/instance registers
#
def onlyInitialized(self) -> bool:
"""A flag indicating if only initialized entries (`True`)
or all entry (`False`) are listed.
"""
return self._onlyInitialized
def setOnlyInitialized(self, onlyInitialized: bool = True) -> None:
"""Specify if only initialized entries (`True`) or all entry (`False`)
shall be listed.
"""
if onlyInitialized != self._onlyInitialized:
self._onlyInitialized = onlyInitialized
self._updateFromRegister()
#
# FIXME[old]: old methods
#
@protect
def keyPressEvent(self, event: QKeyEvent) -> None:
"""Process key events. The :py:class:`RegisterAdapter` supports
the following keys:
I: toggle the `onlyInitialized` flag
U: update display of the entries of this :py:class:`RegisterAdapter`
Note: in a QComboBox this event is only received if the combobox
is closed (not while currently selecting an entry).
"""
key = event.key()
LOG.debug("RegisterAdapter.keyPressEvent: key=%d", key)
if key == Qt.Key_U: # update
LOG.info("Updateting from register: %s",
self._register and type(self._register).__name__)
self._updateFromRegister()
LOG.debug("Updated RegisterAdapter: %s",
self._register and list(self._register.keys()))
elif key == Qt.Key_I: # toggle onlyInitialized
self.setOnlyInitialized(not self.onlyInitialized())
else:
super().keyPressEvent(event)
def currentEntry(self) -> Registrable:
"""Get the currently selected entry.
This may be `None` if no entry is selected.
"""
return self._currentItem()
def setCurrentEntry(self, entry: Registrable) -> None:
"""Select the given entry in this :py:class:`RegisterAdapter`.
Arguments
---------
entry: Registrable
The entry to become the currently selected entry
in this list. `None` will deselect the current element.
Raises
------
ValueError:
The given entry is not an element of this
:py:class:`RegisterAdapter`.
"""
self._setCurrentItem(entry)
def debug(self) -> None:
"""Output debug information.
"""
super().debug()
print(f"debug: RegisterAdapter[{type(self).__name__}]:")
# print(f"debug: * register: {self.register()}")
print(f"debug: * register: {self._register}")
print(f"debug: * onlyInitialized: {self.onlyInitialized()}")
if self._register is not None:
print(f"debug: * register entries:")
for entry in self._register:
print(f"debug: {'+' if not entry.initialized else '-'} "
f"{entry.key} [{repr(entry)}]")
class ToolboxAdapter(RegisterAdapter, qobservables={Toolbox: set()}):
def __init__(self, toolbox: Toolbox = None, **kwargs) -> None:
"""
Arguments
---------
toolbox: Toolbox
"""
super().__init__(**kwargs)
self.setToolbox(toolbox)
def setToolbox(self, toolbox: Toolbox) -> None:
"""Set a :py:class:`Toolbox`. If a toolbox is set, the list
will be filled from that toolbox, no longer from the register.
"""
if toolbox is None:
self.observe(self._register)
self._updateFromRegister()
else:
self.unobserve(self._register)
self.updateFromToolbox()
def updateFromToolbox(self) -> None:
"""Update the list from the :py:class:`Toolbox`.
"""
raise NotImplementedError("A ToolboxAdapter should implement "
"'updateFromToolbox'")
def debug(self) -> None:
"""Output debug information for this :py:class:`ToolboxAdapter`.
"""
super_debug = getattr(super(), 'debug')
if super_debug is not None:
super_debug()
print(f"debug: ToolboxAdapter[{type(self).__name__}]: "
f"Toolbox={self.toolbox()}")
toolbox = self.toolbox()
if toolbox is not None:
for index, datasource in enumerate(toolbox.datasources):
print("debug: "
f"{'**' if datasource is toolbox.datasource else ' '}"
f" ({index}) {datasource} [{type(datasource)}]")
class QRegisterListWidget(QAdaptedListWidget, RegisterAdapter):
"""A :py:class:`QListWidget` for selecting entries from a
:py:class:`Register`.
"""
class QRegisterComboBox(QAdaptedComboBox, RegisterAdapter):
"""A :py:class:`QComboBox` for selecting entries from a
:py:class:`Register`.
"""
# #############################################################################
#
# Class register
#
class QRegisterClassView(QWidget):
"""A widget for viewing a :py:class:`RegisterClass`.
This is essentially just a :py:class:`QRegisterListWidget` that
can be used in two modes: either list the `class_register` or
the `instance_register` of the :py:class:`RegisterClass`.
The view can be changed between these two options by setting
the mode (:py:meth:`setMode`) to either `class` or `instance`.
There are also some radio buttons included that allow to
select the mode.
"""
instanceSelected: pyqtSignal = pyqtSignal(object)
classSelected: pyqtSignal = pyqtSignal(type)
colorInitialized: QColor = QColor(Qt.white).lighter()
colorInitializable: QColor = QColor(Qt.blue).lighter()
colorUninitializable: QColor = QColor(Qt.red).lighter()
def __init__(self, registerClass: RegisterClass = None, **kwargs) -> None:
"""
"""
super().__init__(**kwargs)
self._registerClass = None
self._mode = None # 'class' or 'instance'
self._initUI()
self._initLayout()
self._listWidget.setListWidgetItemFormater(self._formatListWidgetItem)
self.setRegisterClass(registerClass)
def _initUI(self) -> None:
# pylint: disable=attribute-defined-outside-init
"""Initialize the user interface.
"""
self._listWidget = QRegisterListWidget()
self._listWidget.currentItemChanged.connect(self._onCurrentItemChanged)
self._instanceButton = QRadioButton('Instances')
self._instanceButton.clicked.connect(self._onRadioButtonClicked)
self._classButton = QRadioButton('classes')
self._classButton.clicked.connect(self._onRadioButtonClicked)
def _initLayout(self) -> None:
layout = QVBoxLayout()
layout.addWidget(self._listWidget)
row = QHBoxLayout()
row.addWidget(self._classButton)
row.addWidget(self._instanceButton)
layout.addLayout(row)
self.setLayout(layout)
def registerClass(self) -> RegisterClass:
"""The :py:class:`RegisterClass` currently viewed.
"""
return self._registerClass
def setRegisterClass(self, registerClass: RegisterClass) -> None:
"""Set the :py:class:`RegisterClass` to be viewed.
"""
self._registerClass = registerClass
self.update()
def mode(self) -> str:
"""The mode of this :py:class:`QRegisterClassView` (either
`class` or `instance`).
"""
return self._mode
def setMode(self, mode: str) -> None:
"""The mode of this :py:class:`QRegisterClassView` (either
`class` or `instance`).
"""
if mode == self._mode:
return # nothing changed
self._mode = mode
self.update()
if self._mode is None:
raise ValueError("Invalide mode for QRegisterClassView: '{mode}'")
def setClass(self, cls: type) -> None:
"""Set the currently selected class entry. This will switch
the view into `'class'` mode.
"""
self.setMode('class')
registerEntry = cls and self._registerClass.class_register[cls]
self._listWidget.setCurrentEntry(registerEntry)
def setInstance(self, instance: object) -> None:
"""Set the currently selected instance entry. This will switch
the view into `'instance'` mode.
"""
self.setMode('instance')
registerEntry = (instance and
self._registerClass.instance_register[instance.key])
self._listWidget.setCurrentEntry(registerEntry)
def update(self) -> None:
"""Update the display elements.
"""
mode = self._mode
registerClass = self._registerClass
register = None
if mode == 'class':
self._classButton.setChecked(True)
if registerClass is not None:
register = registerClass.class_register
elif mode == 'instance':
self._instanceButton.setChecked(True)
if registerClass is not None:
register = registerClass.instance_register
else:
self._mode = None
self._classButton.setChecked(False)
self._instanceButton.setChecked(False)
self._listWidget.setRegister(register)
super().update()
@protect
def _onCurrentItemChanged(self, current: QListWidgetItem,
_previous: QListWidgetItem) -> None:
"""React to the selection of an item in this list.
"""
entry = None if current is None else current.data(Qt.UserRole)
if isinstance(entry, InstanceRegisterEntry):
self.instanceSelected.emit(entry)
elif isinstance(entry, ClassRegisterEntry):
self.classSelected.emit(entry) # FIXME[bug]: TypeError
# QRegisterClassView.classSelected[type].emit():
# argument 1 has unexpected type 'ClassRegisterEntry'
@protect
def _onRadioButtonClicked(self, _checked: bool) -> None:
"""React to a mode selection by the radio buttons.
"""
self.setMode('class' if self._classButton.isChecked() else 'instance')
def _formatListWidgetItem(self, item: QListWidgetItem) -> None:
"""Format a :py:class:`QListWidgetItem`.
Arguments
---------
item: QListWidgetItem
The :py:class:`QListWidgetItem` to format. It is guaranteed
that the associated data if of type
:py:class:`ClassRegisterEntry`.
"""
entry = item.data(Qt.UserRole)
if entry.initialized:
color = self.colorInitialized
elif entry.initializable:
color = self.colorInitializable
else:
color = self.colorUninitializable
if self._mode == 'class':
item.setBackground(color)
elif self._mode == 'instance':
item.setBackground(color)
#
# Controller
#
class QRegisterClassEntryController(QWidget, QObserver, qobservables={
Register: {'entry_changed'}}, qattributes={
RegisterEntry: True}):
"""A controller for entries of a :py:class:`RegisterClass`. This
may be subclassed to either control :py:class:`ClassRegisterEntry`
entries or :py:class:`InstanceRegisterEntry`. There are two
subclasses, :py:class:`QClassRegisterEntryController` and
:py:class:QInstanceRegisterEntryController` to control these
specific types of entries.
The class register entries themselves are not observable, but
observing the corresponding :py:class:`Register` (either
:py:attr:`RegisterClass.class_register` or
:py:attr:`RegisterClass.instance_register`) allows us to get
informed when the status of the class has changed via the
`entry_changed` notification.
"""
def __init_subclass__(cls, register: RegisterClass = None,
**kwargs) -> None:
# pylint: disable=arguments-differ
"""
"""
super().__init_subclass__(**kwargs)
if register is not None:
cls.register = register
def __init__(self, **kwargs) -> None:
"""Initialization of the :py:class:`QRegisterClassEntryController`.
"""
super().__init__(**kwargs)
self._name = None # the class name
self._description = None # FIXME[question]: what is this?
self._initUI()
self._layoutUI()
def _initUI(self) -> None:
# pylint: disable=attribute-defined-outside-init
"""Initialize the user interface for this
:py:class:`QRegisterClassEntryController`.
"""
if not hasattr(self, '_button'):
self._button = QPushButton()
self._button.setEnabled(False)
self._button.clicked.connect(self._onButtonClicked)
self._keyLabel = QLabel()
self._stateLabel = QLabel()
self._errorLabel = QLabel()
self._errorLabel.setWordWrap(True)
# FIXME[todo]: find the best way (portable, theme aware, ...)
# to set QLabel style and apply this globally (to all QLabels
# in the user interface)
# self._errorLabel.setStyleSheet("QLabel { color : red; }")
palette = self._errorLabel.palette()
palette.setColor(self._errorLabel.foregroundRole(), Qt.red)
self._errorLabel.setPalette(palette)
self._descriptionLabel = QLabel()
self._descriptionLabel.setWordWrap(True)
self._descriptionLabel.setBackgroundRole(QPalette.Base)
# self._descriptionLabel.setSizePolicy(QSizePolicy.Ignored,
# QSizePolicy.Ignored)
# self._descriptionLabel.setScaledContents(True)
self._scrollArea = QScrollArea()
self._scrollArea.setBackgroundRole(QPalette.Dark)
self._scrollArea.setWidget(self._descriptionLabel)
self._scrollArea.setWidgetResizable(True)
def _layoutUI(self) -> None:
"""Layout the user interface for this
:py:class:`QRegisterClassEntryController`.
"""
layout = QVBoxLayout()
rows = QVBoxLayout()
row = QHBoxLayout()
row.addWidget(QLabel('Key: '))
row.addWidget(self._keyLabel)
row.addStretch()
rows.addLayout(row)
row = QHBoxLayout()
row.addWidget(QLabel('State: '))
row.addWidget(self._stateLabel)
row.addStretch()
rows.addLayout(row)
row = QHBoxLayout()
row.addLayout(rows)
row.addStretch()
row.addWidget(self._button)
layout.addLayout(row)
layout.addWidget(self._errorLabel)
layout.addStretch()
# layout.addWidget(self._descriptionLabel)
layout.addWidget(self._scrollArea)
self.setLayout(layout)
def setRegisterEntry(self, entry: Union[RegisterEntry, str]) -> None:
"""Set a new :py:class:`ClassRegisterEntry` to control.
Arguments
---------
entry: Union[RegisterEntry, str]
Either a subclass of the register class or the
(fully qualified) name of such a class.
"""
if isinstance(entry, str):
# An entry key is provided
entry = self._register[entry]
self._registerEntry = entry
# FIXME[todo]
# elif not isinstance(entry, ClassRegisterEntry) and entry is not None:
# raise TypeError("Argument class has invalid type: "
# f"{type(entry)}")
self.update()
def register_changed(self, register: Register,
change: Register.Change, key: str = None) -> None:
# pylint: disable=invalid-name
"""Called upon a change in the :py:class:`ClassRegister`.
Arguments
---------
register: Register
The :py:class:`Register` that was changed.
change: Register.Change
key: str
The key that was changed.
"""
LOG.info("%s.register_changed: %s [%r], key=%s",
type(self).__name__, type(register).__name__, change, key)
self.update()
@protect
def _onButtonClicked(self, checked: bool) -> None:
"""The button has been clicked.
"""
LOG.info("%s.buttonClicked(checked=%r): text=%s, key=%s",
type(self).__name__, checked, self._button.text(),
"None" if self._registerEntry is None else
self._registerEntry.key)
if self._registerEntry is None:
return # nothing to do (should not happen) ...
if not self._registerEntry.initialized:
# initialize the class object represented by the current entry
self._registerEntry.initialize()
class QClassRegisterEntryController(QRegisterClassEntryController):
"""Controller for a :py:class:`ClassRegisterEntry`. Such
an entry represents an (unititialized or initialized) class
object. The controller provides a button to initialize
the class object (import the module that defines the class).
If initialized, some additional information on the class
is presented.
"""
def __init__(self, registerClass: RegisterClass = None, **kwargs) -> None:
super().__init__(**kwargs)
if registerClass is not None:
self.setRegister(registerClass.class_register)
def _initUI(self) -> None:
"""Initialize the user interface for this
:py:class:`QClassRegisterEntryController`.
"""
super()._initUI()
self._button.setText("Initialize")
def update(self) -> None:
"""Update the display of this
:py:class:`QClassRegisterEntryController`.
This may adapt the state of controls (enabled/disabled)
and information displayed in labels and other elements,
according to the current :py:class:`ClassRegisterEntry`.
"""
entry = self._registerEntry
if entry is None:
self._descriptionLabel.setText("")
self._stateLabel.setText("")
self._keyLabel.setText("")
self._button.setEnabled(False)
else:
self._keyLabel.setText(f"[{entry.key}]")
if entry.initialized:
self._stateLabel.setText("initialized")
self._button.setEnabled(False)
self._descriptionLabel.setText(entry.cls.__doc__)
else:
self._stateLabel.setText("uninitialized")
self._button.setEnabled(True)
self._descriptionLabel.setText(entry.module_name + "." +
entry.class_name)
super().update()
class QInitializeButton(QPrepareButton, qobservables={
InstanceRegisterEntry: {'busy_changed', 'state_changed'}}):
"""An initialize button allows to initialize the class or
instance represented by a :py:class:`ClassRegisterEntry`
or :py:class:`InstanceRegisterEntry`, respectively.
"""
def __init__(self, initialize: str = "Initialize", **kwargs) -> None:
"""Initialize the :py:class:`QInitializeButton`.
"""
# _initialize: bool
# A flag indicating if this button is in initialize mode (True)
# or in prepare mode (False).
self._initialize = False
# _initializeText: str
# The label to be displayed on the button if it is in
# initialize mode.
self._initializeText = initialize
# _prepareText: str
# The label to be displayed on the button if it is in
# prepare mode (that is not in initalize mode).
self._prepareText = "Prepare"
super().__init__(**kwargs)
def setInitialize(self) -> None:
"""Set this :py:class:`QInitializeButton` to be in
initialization mode. In that mode, pressing the button
will trigger initialization.
"""
self._initialize = True
self.setPreparable(None)
self.updateState()
def updateState(self) -> None:
"""Update this :py:class:`QInitializeButton` based on the
state of the :py:class:`Preparable`.
"""
if self._preparable is None and self._initialize:
# we are in initialize mode
entry = self._instanceRegisterEntry
if entry is None:
self.setEnabled(False)
self.setChecked(False)
self.setText("no object")
else:
if entry.busy:
self.setEnabled(False)
self.setChecked(True)
if entry.initialized:
self.setText("Uninitializing")
else:
self.setText("Initializing")
else:
self.setEnabled(entry.initializable)
self.setChecked(entry.initialized)
self.setText("Initialize")
else:
self._initialize = False
self.setText(self._prepareText)
super().updateState()
def entry_changed(self, entry: InstanceRegisterEntry,
change: InstanceRegisterEntry.Change) -> None:
"""React to a change of the observed
:py:class:`InstanceRegisterEntry`. Such a change means that
the entry was initialized, that is that an object was created.
"""
if not entry.busy:
self.setPreparable(entry.obj)
else:
self.updateState()
class QInstanceRegisterEntryController(QRegisterClassEntryController,
qobservables={InstanceRegisterEntry: {'state_changed'}}):
"""A controller for an :py:class:`InstanceRegisterEntry`. This
controller allows to instantiate and initialize a registered
instance of a class.
"""
def __init__(self, registerClass: RegisterClass = None, **kwargs) -> None:
super().__init__(**kwargs)
if registerClass is not None:
self.setRegister(registerClass.instance_register)
def _initUI(self) -> None:
"""Initialize the user interface for this
:py:class:`QInstanceRegisterEntryController`.
"""
self._button = QInitializeButton()
self.addAttributePropagation(InstanceRegisterEntry, self._button)
super()._initUI()
def update(self) -> None:
"""Update the display of this
:py:class:`QInstanceRegisterEntryController`.
This may adapt the state of controls (enabled/disabled)
and information displayed in labels and other elements,
according to the current :py:class:`InstanceRegisterEntry`.
"""
entry = self._registerEntry
if entry is None:
self._descriptionLabel.setText("")
self._stateLabel.setText("")
self._keyLabel.setText("")
self._button.setPreparable(None)
else:
self._keyLabel.setText(f"[{entry.key}]")
if entry.initialized:
self._stateLabel.setText("initialized")
self._button.setPreparable(entry.obj)
self._descriptionLabel.setText(entry.cls.__doc__)
else:
self._stateLabel.setText("uninitialized")
self._button.setInitialize()
self._descriptionLabel.setText(str(entry))
super().update()
@protect
def _onButtonClicked(self, checked: bool) -> None:
"""The button has been clicked.
"""
LOG.info("%s.buttonClicked(checked=%r): text=%s, key=%s",
type(self).__name__, checked, self._button.text(),
"None" if self._registerEntry is None else
self._registerEntry.key)
if self._registerEntry is None:
return # nothing to do (should not happen) ...
if not self._registerEntry.initialized:
# initialize the class object represented by the current entry
self._button.setEnabled(False)
self._button.setText("Initializing")
print("QInstanceRegisterEntryController: Initializing ...")
self._registerEntry.initialize()
def setRegisterEntry(self, entry: Union[RegisterEntry, str]) -> None:
"""Set a new :py:class:`ClassRegisterEntry` to control.
Arguments
---------
entry: Union[RegisterEntry, str]
Either a subclass of the register class or the
(fully qualified) name of such a class.
"""
super().setRegisterEntry(entry)
# FIXME[hack]: set the observable InstanceRegisterEntry ...
self.setInstanceRegisterEntry(self._registerEntry)
#
# Observers
#
def entry_changed(self, entry: InstanceRegisterEntry,
change: InstanceRegisterEntry.Change) -> None:
self.update()
class QInstanceRegisterComboBox(QRegisterComboBox):
"""A :py:class:`QComboBox` for selecting entries from a
:py:class:`Register`.
"""
def _formatItemAt(self, index: int) -> None:
"""Format the item at the given index to reflect
the state of the underlying item.
This method may be extended by subclasses.
"""
super()._formatItemAt(index)
# disable item if tool is not instantiable
instance = self._getItemAt(index) # instance or InstanceRegisterEntry
if (isinstance(instance, InstanceRegisterEntry) and
not instance.initializable):
item = self.model().item(index) # QtGui.QStandardItem
item.setFlags(item.flags() & ~ Qt.ItemIsEnabled)
|
|
# * coding: utf8 *
"""Amazon AWS Connection."""
from __future__ import absolute_import, unicode_literals
from vine import promise, transform
from kombu.asynchronous.aws.ext import AWSRequest, get_response
from kombu.asynchronous.http import Headers, Request, get_client
from kombu.five import items, python_2_unicode_compatible
import io
try: # pragma: no cover
from email import message_from_bytes
from email.mime.message import MIMEMessage
# py3
def message_from_headers(hdr): # noqa
bs = "\r\n".join("{}: {}".format(*h) for h in hdr)
return message_from_bytes(bs.encode())
except ImportError: # pragma: no cover
from mimetools import Message as MIMEMessage # noqa
# py2
def message_from_headers(hdr): # noqa
return io.BytesIO(b'\r\n'.join(
b'{0}: {1}'.format(*h) for h in hdr
))
__all__ = (
'AsyncHTTPSConnection', 'AsyncConnection',
)
@python_2_unicode_compatible
class AsyncHTTPResponse(object):
"""Async HTTP Response."""
def __init__(self, response):
self.response = response
self._msg = None
self.version = 10
def read(self, *args, **kwargs):
return self.response.body
def getheader(self, name, default=None):
return self.response.headers.get(name, default)
def getheaders(self):
return list(items(self.response.headers))
@property
def msg(self):
if self._msg is None:
self._msg = MIMEMessage(message_from_headers(self.getheaders()))
return self._msg
@property
def status(self):
return self.response.code
@property
def reason(self):
if self.response.error:
return self.response.error.message
return ''
def __repr__(self):
return repr(self.response)
@python_2_unicode_compatible
class AsyncHTTPSConnection(object):
"""Async HTTP Connection."""
Request = Request
Response = AsyncHTTPResponse
method = 'GET'
path = '/'
body = None
default_ports = {'http': 80, 'https': 443}
def __init__(self, strict=None, timeout=20.0, http_client=None):
self.headers = []
self.timeout = timeout
self.strict = strict
self.http_client = http_client or get_client()
def request(self, method, path, body=None, headers=None):
self.path = path
self.method = method
if body is not None:
try:
read = body.read
except AttributeError:
self.body = body
else:
self.body = read()
if headers is not None:
self.headers.extend(list(items(headers)))
def getrequest(self):
headers = Headers(self.headers)
return self.Request(self.path, method=self.method, headers=headers,
body=self.body, connect_timeout=self.timeout,
request_timeout=self.timeout, validate_cert=False)
def getresponse(self, callback=None):
request = self.getrequest()
request.then(transform(self.Response, callback))
return self.http_client.add_request(request)
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
def putrequest(self, method, path):
self.method = method
self.path = path
def putheader(self, header, value):
self.headers.append((header, value))
def endheaders(self):
pass
def send(self, data):
if self.body:
self.body += data
else:
self.body = data
def __repr__(self):
return '<AsyncHTTPConnection: {0!r}>'.format(self.getrequest())
class AsyncConnection(object):
"""Async AWS Connection."""
def __init__(self, sqs_connection, http_client=None, **kwargs): # noqa
self.sqs_connection = sqs_connection
self._httpclient = http_client or get_client()
def get_http_connection(self):
return AsyncHTTPSConnection(http_client=self._httpclient)
def _mexe(self, request, sender=None, callback=None):
callback = callback or promise()
conn = self.get_http_connection()
if callable(sender):
sender(conn, request.method, request.path, request.body,
request.headers, callback)
else:
conn.request(request.method, request.url,
request.body, request.headers)
conn.getresponse(callback=callback)
return callback
class AsyncAWSQueryConnection(AsyncConnection):
"""Async AWS Query Connection."""
def __init__(self, sqs_connection, http_client=None,
http_client_params=None, **kwargs):
if not http_client_params:
http_client_params = {}
AsyncConnection.__init__(self, sqs_connection, http_client,
**http_client_params)
def make_request(self, operation, params_, path, verb, callback=None): # noqa
params = params_.copy()
if operation:
params['Action'] = operation
signer = self.sqs_connection._request_signer # noqa
# defaults for non-get
signing_type = 'standard'
param_payload = {'data': params}
if verb.lower() == 'get':
# query-based opts
signing_type = 'presignurl'
param_payload = {'params': params}
request = AWSRequest(method=verb, url=path, **param_payload)
signer.sign(operation, request, signing_type=signing_type)
prepared_request = request.prepare()
return self._mexe(prepared_request, callback=callback)
def get_list(self, operation, params, markers, path='/', parent=None, verb='POST', callback=None): # noqa
return self.make_request(
operation, params, path, verb,
callback=transform(
self._on_list_ready, callback, parent or self, markers,
operation
),
)
def get_object(self, operation, params, path='/', parent=None, verb='GET', callback=None): # noqa
return self.make_request(
operation, params, path, verb,
callback=transform(
self._on_obj_ready, callback, parent or self, operation
),
)
def get_status(self, operation, params, path='/', parent=None, verb='GET', callback=None): # noqa
return self.make_request(
operation, params, path, verb,
callback=transform(
self._on_status_ready, callback, parent or self, operation
),
)
def _on_list_ready(self, parent, markers, operation, response): # noqa
service_model = self.sqs_connection.meta.service_model
if response.status == 200:
_, parsed = get_response(
service_model.operation_model(operation), response.response
)
return parsed
else:
raise self._for_status(response, response.read())
def _on_obj_ready(self, parent, operation, response): # noqa
service_model = self.sqs_connection.meta.service_model
if response.status == 200:
_, parsed = get_response(
service_model.operation_model(operation), response.response
)
return parsed
else:
raise self._for_status(response, response.read())
def _on_status_ready(self, parent, operation, response): # noqa
service_model = self.sqs_connection.meta.service_model
if response.status == 200:
httpres, _ = get_response(
service_model.operation_model(operation), response.response
)
return httpres.code
else:
raise self._for_status(response, response.read())
def _for_status(self, response, body):
context = 'Empty body' if not body else 'HTTP Error'
return Exception("Request {} HTTP {} {} ({})".format(
context, response.status, response.reason, body
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.