gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Django settings for kpcc_backroom_handshakes project.
# -*- coding: utf-8 -*-
import os
from os.path import expanduser
from kpcc_backroom_handshakes.settings_common import *
import pytz
from pytz import timezone
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR = DEBUG
INTERNAL_IPS = CONFIG.get("internal_ips", None)
DATABASES = {
"default": {
"ENGINE" : "django.db.backends.mysql",
"NAME" : CONFIG["database"]["database"],
"USER" : CONFIG["database"]["username"],
"PASSWORD" : CONFIG["database"]["password"],
"HOST" : CONFIG["database"]["host"],
"PORT" : CONFIG["database"]["port"]
}
}
SECRET_KEY = CONFIG["secret_key"]
if "twitter" in CONFIG["api"]:
TWITTER_CONSUMER_KEY = CONFIG["api"]["twitter"]["consumer_key"]
TWITTER_CONSUMER_SECRET = CONFIG["api"]["twitter"]["consumer_secret"]
TWITTER_ACCESS_TOKEN = CONFIG["api"]["twitter"]["access_token"]
TWITTER_ACCESS_TOKEN_SECRET = CONFIG["api"]["twitter"]["access_token_secret"]
LOCAL_TWITTER_TIMEZONE = pytz.timezone("US/Pacific")
TWITTER_TIMEZONE = timezone("UTC")
if "slack" in CONFIG["api"]:
SLACK_TOKEN = CONFIG["api"]["slack"]["token"]
SLACK_API_KEY = CONFIG["api"]["slack"]["api_key"]
if "maplight" in CONFIG["api"]:
MAP_LIGHT_API_KEY = CONFIG["api"]["maplight"]["api_key"]
if "propublica" in CONFIG["api"]:
PRO_PUBLICA_API_KEY = CONFIG["api"]["propublica"]["api_key"]
REQUEST_HEADERS = {
"From": CONFIG["api"]["headers"]["from"],
"User-agent": CONFIG["api"]["headers"]["user_agent"]
}
# auth to send out emails when models change
if "email" in CONFIG:
EMAIL_HOST = CONFIG["email"]["host"]
EMAIL_HOST_USER = CONFIG["email"]["user"]
EMAIL_HOST_PASSWORD = CONFIG["email"]["password"]
EMAIL_PORT = CONFIG["email"]["port"]
EMAIL_USE_TLS = CONFIG["email"]["use_tls"]
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
if CONFIG["installed_apps"]:
INSTALLED_APPS += tuple(CONFIG["installed_apps"])
else:
pass
if DEBUG == True:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
}
}
else:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "handshakes_cache",
"TIMEOUT": 600,
"OPTIONS": {
"MAX_ENTRIES": 500
}
}
}
# Python dotted path to the WSGI application used by Django"s runserver.
WSGI_APPLICATION = "kpcc_backroom_handshakes.wsgi.application"
ADMIN_MEDIA_PREFIX = "/media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ""
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ""
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(SITE_ROOT, "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
SITE_URL = CONFIG["site_url"]
# Additional locations of static files
STATICFILES_DIRS = (
)
# build paths inside the project like this: os.path.join(base_dir, ...)
if "build" in CONFIG:
if "staging" in CONFIG["build"]:
STAGING = CONFIG["build"]["staging"]
if "staging_prefix" in CONFIG["build"]:
STAGING_PREFIX = CONFIG["build"]["staging_prefix"]
if "live_prefix" in CONFIG["build"]:
LIVE_PREFIX = CONFIG["build"]["live_prefix"]
if "deploy_dir" in CONFIG["build"]:
DEPLOY_DIR = CONFIG["build"]["deploy_dir"]
STATIC_DIR = STATIC_URL
BUILD_DIR = os.path.join(STATIC_ROOT, CONFIG["build"]["build_dir"])
BAKERY_VIEWS = tuple(CONFIG["build"]["views"])
URL_PATH = ""
AWS_BUCKET_NAME = CONFIG["build"]["aws_bucket_name"]
AWS_ACCESS_KEY_ID = CONFIG["build"]["aws_access_key_id"]
AWS_SECRET_ACCESS_KEY = CONFIG["build"]["aws_secret_access_key"]
AWS_S3_HOST = CONFIG["build"]["aws_s3_host"]
BAKERY_CACHE_CONTROL = {
'text/html': CONFIG["build"]["bakery_cache_control"]["html"],
'application/javascript': CONFIG["build"]["bakery_cache_control"]["javascript"]
}
STATIC_TO_IGNORE = tuple(CONFIG["build"]["static_to_ignore"])
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"formatters": {
"verbose": {
"format" : "\033[1;36m%(levelname)s: %(filename)s (def %(funcName)s %(lineno)s): \033[1;37m %(message)s",
"datefmt" : "%d/%b/%Y %H:%M:%S"
},
"simple": {
"format": "\033[1;36m%(levelname)s: %(filename)s (def %(funcName)s %(lineno)s): \033[1;37m %(message)s"
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple"
},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
},
"slack-error": {
"level": "ERROR",
"api_key": SLACK_API_KEY,
"username": "backroom-handshakes",
"icon_url": "https://pbs.twimg.com/media/CSWMwztWoAAYoxC.jpg",
"class": "slacker_log_handler.SlackerLogHandler",
"channel": "#2018-election-results"
},
"slack-debug": {
"level": "DEBUG",
"username": "backroom-handshakes",
"icon_url": "https://pbs.twimg.com/media/CSWMwztWoAAYoxC.jpg",
"api_key": SLACK_API_KEY,
"class": "slacker_log_handler.SlackerLogHandler",
"channel": "#2018-election-results"
},
"slack-info": {
"level": "INFO",
"username": "backroom-handshakes",
"icon_url": "https://pbs.twimg.com/media/CSWMwztWoAAYoxC.jpg",
"api_key": SLACK_API_KEY,
"class": "slacker_log_handler.SlackerLogHandler",
"channel": "#2018-election-results"
},
"file": {
"level": "DEBUG",
"class": "logging.FileHandler",
"filename": "mysite.log",
"formatter": "verbose"
},
},
"loggers": {
"kpcc_backroom_handshakes": {
"handlers": [
"console",
"mail_admins",
"slack-info",
],
"level": "DEBUG",
"propagate": False,
},
}
}
| |
import asyncio
import json
import logging
from typing import Union
from dht.node import Node
class Message:
MESSAGE_ID = 0
def __init__(self, msg_id: int, data: Union[str, dict], command: str = None):
self.id = msg_id
self.data = data
self.command = command
if command:
self.future = asyncio.Future()
def get_bytes(self) -> bytes:
""" Get the bytes of the message, include the command if it is defined. """
message = {
"id": self.id,
"data": self.data,
}
if self.command:
message["command"] = self.command
message_json = json.dumps(message)
message_encoded = message_json.encode()
return message_encoded
@staticmethod
def create(command: str, data: Union[str, dict]) -> 'Message':
""" Create a new Message with the given command and data. """
message = Message(Message.MESSAGE_ID, data, command)
Message.MESSAGE_ID += 1
return message
@staticmethod
def from_bytes(data: bytes) -> 'Message':
""" Create a Message from the given data. """
data = json.loads(data.decode())
if 'command' not in data:
data['command'] = None
message = Message(data['id'], data['data'], data['command'])
return message
@staticmethod
def create_response(message: 'Message', data: Union[str, dict]) -> 'Message':
""" Create a response on the given Message.
"""
message = Message(message.id, data)
return message
class DHTProtocol(asyncio.Protocol):
def __init__(self, self_key, bucket_tree, value_store, listen_port):
self.self_key = self_key
self.routing = bucket_tree
self.value_store = value_store
self.listen_port = listen_port
self.transport = None
self.node = None
self.messages = {}
def send_message(self, message):
""" Send a message to the other end, only send the id, command and
data keys of the message. """
self.messages[message.id] = message
data = message.get_bytes()
logging.debug("Sending: {:s}".format(data.decode()))
self.transport.write(data)
def data_received(self, data):
"""Receive data from the other end, determine if it is a command or a
response and act accordingly. """
message = Message.from_bytes(data)
logging.debug(data)
if message.command:
self.command_received(message)
else:
self.response_received(message)
def command_received(self, message):
""" Receive a command, call the right handle and write the response. """
logging.info("Message received with command: {}".format(message.command))
commands = {
"identify": self.handle_identify,
"find_node": self.handle_find_node,
"find_value": self.handle_find_value,
"store": self.handle_store,
}
# Get the appropriate command.
command = commands[message.command]
# Call the command to get the response.
response = command(message.data)
logging.info("Sending response on command: {}".format(message.command))
# Create a response message with the data from the command.
message = Message.create_response(message, response)
data = message.get_bytes()
logging.debug("Sending response: {:s}".format(data.decode()))
self.transport.write(data)
def response_received(self, message):
""" Receive a response, set the result of the Future. """
orig_message = self.messages[message.id]
orig_message.future.set_result(message.data)
logging.info("Response received on command: {}".format(orig_message.command))
response_handlers = {
"identify": self.handle_identify_response,
"find_node": self.handle_find_response,
"find_value": self.handle_find_response,
}
if orig_message.command in response_handlers:
response_handlers[orig_message.command](message.data)
del self.messages[message.id]
def identify(self):
message = Message.create('identify', {
"key": self.self_key,
"request_key": self.node is None,
"listen_port": self.listen_port,
})
self.send_message(message)
def find_node(self, key):
message = Message.create('find_node', key)
self.send_message(message)
return message.future
def find_value(self, key):
message = Message.create('find_value', key)
self.send_message(message)
return message.future
def store(self, value):
message = Message.create('store', value)
self.send_message(message)
return message.future
def handle_identify(self, data):
socket = self.transport.get_extra_info('peername')
self.node = Node(data["key"], socket[0], data['listen_port'], self)
self.routing.add_node(self.node)
if data["request_key"]:
return {
"key": self.self_key,
"request_key": False,
}
else:
return False
def handle_find_node(self, key):
""" Give back the closest nodes to the given key. """
return [node.get_data() for node in self.routing.find_nodes(key)]
def handle_find_value(self, key):
try:
value = self.value_store.retrieve(key)
except KeyError:
value = [node.get_data() for node in self.routing.find_nodes(key)]
return value
def handle_store(self, data):
self.value_store.store(data)
def handle_identify_response(self, data: dict) -> None:
""" Handle the response on our identify() request, add the Node. """
socket = self.transport.get_extra_info('peername')
self.node = Node(data["key"], socket[0], socket[1], self)
self.routing.add_node(self.node)
def handle_find_response(self, data):
"""
Handle the response on our find_value or find_node request.
:param data: mixed
"""
# When the data is from find_value it can be just the value.
if type(data) != list:
return
for node in data:
node = Node(node[0], node[1], node[2])
self.routing.add_node(node)
class DHTServerProtocol(DHTProtocol):
def connection_made(self, transport):
logging.info("Connection made with {}".format(transport))
self.transport = transport
class DHTClientProtocol(DHTProtocol):
def connection_made(self, transport):
logging.info("Connection made with {}".format(transport))
self.transport = transport
self.identify()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._policy_definitions_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PolicyDefinitionsOperations:
"""PolicyDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.policy.v2015_10_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
policy_definition_name: str,
parameters: "_models.PolicyDefinition",
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Creates or updates a policy definition.
:param policy_definition_name: The name of the policy definition to create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyDefinition')
request = build_create_or_update_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
policy_definition_name: str,
**kwargs: Any
) -> None:
"""Deletes a policy definition.
:param policy_definition_name: The name of the policy definition to delete.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace_async
async def get(
self,
policy_definition_name: str,
**kwargs: Any
) -> "_models.PolicyDefinition":
"""Gets the policy definition.
:param policy_definition_name: The name of the policy definition to get.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}'} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyDefinitionListResult"]:
"""Gets all the policy definitions for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyDefinitionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions'} # type: ignore
| |
import struct
from datetime import datetime
import numpy as np
from pySDC.helpers.pysdc_helper import FrozenClass
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
class _fault_stats(FrozenClass):
def __init__(self):
self.nfaults_called = 0
self.nfaults_injected_u = 0
self.nfaults_injected_f = 0
self.nfaults_detected = 0
self.ncorrection_attempts = 0
self.nfaults_missed = 0
self.nfalse_positives = 0
self.nfalse_positives_in_correction = 0
self.nclean_steps = 0
self._freeze()
class implicit_sweeper_faults(generic_implicit):
"""
LU sweeper using LU decomposition of the Q matrix for the base integrator, special type of generic implicit sweeper
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'allow_fault_correction' not in params:
params['allow_fault_correction'] = False
if 'detector_threshold' not in params:
params['detector_threshold'] = 1.0
if 'dump_injections_filehandle' not in params:
params['dump_injections_filehandle'] = None
# call parent's initialization routine
super(implicit_sweeper_faults, self).__init__(params)
self.fault_stats = _fault_stats()
self.fault_injected = False
self.fault_detected = False
self.in_correction = False
self.fault_iteration = False
def reset_fault_stats(self):
"""
Helper method to reset all fault related stats and flags. Will be called after the run in post-processing.
"""
self.fault_stats = _fault_stats()
self.fault_injected = False
self.fault_detected = False
self.in_correction = False
self.fault_iteration = False
@staticmethod
def bitsToFloat(b):
"""
Static helper method to get a number from bit into float representation
Args:
b: bit representation of a number
Returns:
float representation of b
"""
s = struct.pack('>q', b)
return struct.unpack('>d', s)[0]
@staticmethod
def floatToBits(f):
"""
Static helper method to get a number from float into bit representation
Args:
f: float representation of a number
Returns:
bit representation of f
"""
s = struct.pack('>d', f)
return struct.unpack('>q', s)[0]
def do_bitflip(self, a, pos):
"""
Method to do a bit flip
Args:
a: float representation of a number
pos (int between 0 and 63): position of bit flip
Returns:
float representation of a number after bit flip at pos
"""
# flip of mantissa (fraction) bit (pos between 0 and 51) or of exponent bit (pos between 52 and 62)
if pos < 63:
b = self.floatToBits(a)
# mask: bit representation with 1 at pos and 0 elsewhere
mask = 1 << pos
# ^: bitwise xor-operator --> bit flip at pos
c = b ^ mask
return self.bitsToFloat(c)
# "flip" of sign bit (pos = 63)
elif pos == 63:
return -a
def inject_fault(self, type=None, target=None):
"""
Main method to inject a fault
Args:
type (str): string describing whether u of f should be affected
target: data to be modified
"""
pos = 0
bitflip_entry = 0
# do bitflip in u
if type == 'u':
# do something to target = u here!
# do a bitflip at random vector entry of u at random position in bit representation
ulen = len(target)
bitflip_entry = np.random.randint(ulen)
pos = np.random.randint(64)
tmp = target[bitflip_entry]
target[bitflip_entry] = self.do_bitflip(target[bitflip_entry], pos)
# print(' fault in u injected')
self.fault_stats.nfaults_injected_u += 1
# do bitflip in f
elif type == 'f':
# do something to target = f here!
# do a bitflip at random vector entry of f at random position in bit representation
flen = len(target)
bitflip_entry = np.random.randint(flen)
pos = np.random.randint(64)
tmp = target[bitflip_entry]
target[bitflip_entry] = self.do_bitflip(target[bitflip_entry], pos)
# print(' fault in f injected')
self.fault_stats.nfaults_injected_f += 1
else:
tmp = None
print('ERROR: wrong fault type specified, got %s' % type)
exit()
self.fault_injected = True
if self.params.dump_injections_filehandle is not None:
out = str(datetime.now())
out += ' --- '
out += type + ' ' + str(bitflip_entry) + ' ' + str(pos)
out += ' --- '
out += str(tmp) + ' ' + str(target[bitflip_entry]) + ' ' + \
str(np.abs(tmp - target[bitflip_entry]))
out += '\n'
self.params.dump_injections_filehandle.write(out)
def detect_fault(self, current_node=None, rhs=None):
"""
Main method to detect a fault
Args:
current_node (int): current node we are working with at the moment
rhs: right-hand side vector for usage in detector
"""
# get current level for further use
L = self.level
# calculate solver residual
res = L.u[current_node] - L.dt * self.QI[current_node, current_node] * L.f[current_node] - rhs
res_norm = np.linalg.norm(res, np.inf)
if np.isnan(res_norm) or res_norm > self.params.detector_threshold:
# print(' FAULT DETECTED!')
self.fault_detected = True
else:
self.fault_detected = False
# update statistics
# fault injected and fault detected -> yeah!
if self.fault_injected and self.fault_detected:
self.fault_stats.nfaults_detected += 1
# no fault injected but fault detected -> meh!
elif not self.fault_injected and self.fault_detected:
self.fault_stats.nfalse_positives += 1
# in correction mode and fault detected -> meeeh!
if self.in_correction:
self.fault_stats.nfalse_positives_in_correction += 1
# fault injected but no fault detected -> meh!
elif self.fault_injected and not self.fault_detected:
self.fault_stats.nfaults_missed += 1
# no fault injected and no fault detected -> yeah!
else:
self.fault_stats.nclean_steps += 1
def correct_fault(self):
"""
Main method to correct a fault or issue a restart
"""
# do correction magic or issue restart here... could be empty!
# we need to make sure that not another fault is injected here.. could also temporarily lower the probability
self.in_correction = True
# print(' doing correction...')
self.fault_stats.ncorrection_attempts += 1
self.fault_detected = False
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QdF(u^k) + tau
# get QF(u^k)
integral = self.integrate()
for m in range(M):
# get -QdF(u^k)_m
for j in range(M + 1):
integral[m] -= L.dt * self.QI[m + 1, j] * L.f[j]
# add initial value
integral[m] += L.u[0]
# add tau if associated
if L.tau[m] is not None:
integral[m] += L.tau[m]
fault_node = np.random.randint(M)
# do the sweep
m = 0
while m < M:
# see if there will be a fault
self.fault_injected = False
fault_at_u = False
fault_at_f = False
if not self.in_correction and m == fault_node and self.fault_iteration:
if np.random.randint(2) == 0:
fault_at_u = True
else:
fault_at_f = True
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
# this is what needs to be protected separately!
rhs = P.dtype_u(integral[m])
for j in range(m + 1):
rhs += L.dt * self.QI[m + 1, j] * L.f[j]
if fault_at_u:
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1],
L.time + L.dt * self.coll.nodes[m])
# inject fault at some u value
self.inject_fault(type='u', target=L.u[m + 1])
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
elif fault_at_f:
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1],
L.time + L.dt * self.coll.nodes[m])
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# inject fault at some f value
self.inject_fault(type='f', target=L.f[m + 1])
else:
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1],
L.time + L.dt * self.coll.nodes[m])
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# see if our detector finds something
self.detect_fault(current_node=m + 1, rhs=rhs)
# if we are allowed to try correction, do so, otherwise proceed with sweep
if not self.in_correction and self.fault_detected and self.params.allow_fault_correction:
self.correct_fault()
else:
self.in_correction = False
m += 1
# indicate presence of new values at this level
L.status.updated = True
return None
| |
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import KFold
from ._split import LabelKFold
from ._split import LeaveOneLabelOut
from ._split import LeaveOneOut
from ._split import LeavePLabelOut
from ._split import LeavePOut
from ._split import ShuffleSplit
from ._split import LabelShuffleSplit
from ._split import StratifiedKFold
from ._split import StratifiedShuffleSplit
from ._split import PredefinedSplit
from ._split import check_cv, _safe_split
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
ALL_CVS = {'KFold': KFold,
'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeaveOneOut': LeaveOneOut,
'LeavePLabelOut': LeavePLabelOut,
'LeavePOut': LeavePOut,
'ShuffleSplit': ShuffleSplit,
'LabelShuffleSplit': LabelShuffleSplit,
'StratifiedKFold': StratifiedKFold,
'StratifiedShuffleSplit': StratifiedShuffleSplit,
'PredefinedSplit': PredefinedSplit}
LABEL_CVS = {'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeavePLabelOut': LeavePLabelOut,
'LabelShuffleSplit': LabelShuffleSplit}
def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, labels))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, labels))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, labels=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state),
labels, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, labels, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, labels):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, labels=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, labels)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, labels))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, labels=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, labels) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| |
#!/usr/bin/env python
# encode = utf-8
import sys
import time
import serial
from threading import Thread
class RawInput:
"""Gets a single character from standard input. Does not echo to the screen."""
def __init__(self):
try:
self.impl = RawInputWindows()
except ImportError:
self.impl = RawInputUnix()
def __call__(self):
return self.impl()
class RawInputUnix:
def __init__(self):
import tty
import sys
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class RawInputWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class Baudrate:
VERSION = '1.0'
READ_TIMEOUT = 5
BAUDRATES = [
# "1200",
# "1800",
# "2400",
# "4800",
"9600",
"38400",
"19200",
"57600",
"115200",
]
UPKEYS = ['u', 'U', 'A']
DOWNKEYS = ['d', 'D', 'B']
MIN_CHAR_COUNT = 25
WHITESPACE = [' ', '\t', '\r', '\n']
PUNCTUATION = ['.', ',', ':', ';', '?', '!']
VOWELS = ['a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U']
def __init__(self, port=None, threshold=MIN_CHAR_COUNT, timeout=READ_TIMEOUT, name=None, auto=True, verbose=False):
self.port = port
self.threshold = threshold
self.timeout = timeout
self.name = name
self.auto_detect = auto
self.verbose = verbose
self.index = len(self.BAUDRATES) - 1
self.valid_characters = []
self.ctlc = False
self.thread = None
self._gen_char_list()
def _gen_char_list(self):
c = ' '
while c <= '~':
self.valid_characters.append(c)
c = chr(ord(c) + 1)
for c in self.WHITESPACE:
if c not in self.valid_characters:
self.valid_characters.append(c)
def _print(self, data):
if self.verbose:
sys.stderr.write(data)
def Open(self):
self.serial = serial.Serial(self.port, timeout=self.timeout)
self.NextBaudrate(0)
def NextBaudrate(self, updn):
self.index += updn
if self.index >= len(self.BAUDRATES):
self.index = 0
elif self.index < 0:
self.index = len(self.BAUDRATES) - 1
sys.stderr.write(
'\n\n@@@@@@@@@@@@@@@@@@@@@ Baudrate: %s @@@@@@@@@@@@@@@@@@@@@\n\n' % self.BAUDRATES[self.index])
self.serial.flush()
self.serial.baudrate = self.BAUDRATES[self.index]
self.serial.flush()
def Detect(self):
count = 0
whitespace = 0
punctuation = 0
vowels = 0
start_time = 0
timed_out = False
clear_counters = False
if not self.auto_detect:
self.thread = Thread(None, self.HandleKeypress, None, (self, 1))
self.thread.start()
while True:
if start_time == 0:
start_time = time.time()
byte = self.serial.read(1)
if byte:
if self.auto_detect and byte in self.valid_characters:
if byte in self.WHITESPACE:
whitespace += 1
elif byte in self.PUNCTUATION:
punctuation += 1
elif byte in self.VOWELS:
vowels += 1
count += 1
else:
clear_counters = True
self._print(byte)
if count >= self.threshold and whitespace > 0 and punctuation > 0 and vowels > 0:
break
elif (time.time() - start_time) >= self.timeout:
timed_out = True
else:
timed_out = True
if timed_out and self.auto_detect:
start_time = 0
self.NextBaudrate(-1)
clear_counters = True
timed_out = False
if clear_counters:
whitespace = 0
punctuation = 0
vowels = 0
count = 0
clear_counters = False
if self.ctlc:
break
self._print("\n")
return self.BAUDRATES[self.index]
def HandleKeypress(self, *args):
userinput = RawInput()
while not self.ctlc:
c = userinput()
if c in self.UPKEYS:
self.NextBaudrate(1)
elif c in self.DOWNKEYS:
self.NextBaudrate(-1)
elif c == '\x03':
self.ctlc = True
def MinicomConfig(self, name=None):
success = True
if name is None:
name = self.name
config = "########################################################################\n"
config += "# Minicom configuration file - use \"minicom -s\" to change parameters.\n"
config += "pu port %s\n" % self.port
config += "pu baudrate %s\n" % self.BAUDRATES[self.index]
config += "pu bits 8\n"
config += "pu parity N\n"
config += "pu stopbits 1\n"
config += "pu rtscts No\n"
config += "########################################################################\n"
if name is not None and name:
try:
open("/etc/minicom/minirc.%s" % name, "w").write(config)
except Exception, e:
print "Error saving minicom config file:", str(e)
success = False
return (success, config)
def Close(self):
self.ctlc = True
self.serial.close()
if __name__ == '__main__':
import subprocess
from getopt import getopt as GetOpt, GetoptError
def usage():
baud = Baudrate()
print ""
print "Baudrate v%s" % baud.VERSION
print "Craig Heffner, http://www.devttys0.com"
print ""
print "Usage: %s [OPTIONS]" % sys.argv[0]
print ""
print "\t-p <serial port> Specify the serial port to use [/dev/ttyUSB0]"
print "\t-t <seconds> Set the timeout period used when switching baudrates in auto detect mode [%d]" % baud.READ_TIMEOUT
print "\t-c <num> Set the minimum ASCII character threshold used during auto detect mode [%d]" % baud.MIN_CHAR_COUNT
print "\t-n <name> Save the resulting serial configuration as <name> and automatically invoke minicom (implies -a)"
print "\t-a Enable auto detect mode"
print "\t-b Display supported baud rates and exit"
print "\t-q Do not display data read from the serial port"
print "\t-h Display help"
print ""
sys.exit(1)
def main():
display = False
verbose = True
auto = False
run = False
threshold = 25
timeout = 5
name = None
port = '/dev/ttyUSB0'
try:
(opts, args) = GetOpt(sys.argv[1:], 'p:t:c:n:abqh')
except GetoptError, e:
print e
usage()
for opt, arg in opts:
if opt == '-t':
timeout = int(arg)
elif opt == '-c':
threshold = int(arg)
elif opt == '-p':
port = arg
elif opt == '-n':
name = arg
auto = True
run = True
elif opt == '-a':
auto = True
elif opt == '-b':
display = True
elif opt == '-q':
verbose = False
else:
usage()
baud = Baudrate(port, threshold=threshold,
timeout=timeout, name=name, verbose=verbose, auto=auto)
if display:
print ""
for rate in baud.BAUDRATES:
print "\t%s" % rate
print ""
else:
print ""
print "Starting baudrate detection on %s, turn on your serial device now." % port
print "Press Ctl+C to quit."
print ""
baud.Open()
try:
rate = baud.Detect()
print "\nDetected baudrate: %s" % rate
if name is None:
print "\nSave minicom configuration as: ",
name = sys.stdin.readline().strip()
print ""
(ok, config) = baud.MinicomConfig(name)
if name and name is not None:
if ok:
if not run:
print "Configuration saved. Run minicom now [n/Y]? ",
yn = sys.stdin.readline().strip()
print ""
if yn == "" or yn.lower().startswith('y'):
run = True
if run:
subprocess.call(["minicom", name])
else:
print config
else:
print config
except KeyboardInterrupt:
pass
baud.Close()
main()
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Utility code to translate between python objects and AMQP encoded data
fields.
"""
from cStringIO import StringIO
from struct import pack, calcsize, unpack
class EOF(Exception):
pass
class Codec(object):
def __init__(self, stream):
self.stream = stream
self.nwrote = 0
self.nread = 0
self.incoming_bits = []
self.outgoing_bits = []
def read(self, n):
data = self.stream.read(n)
if n > 0 and len(data) == 0:
raise EOF()
self.nread += len(data)
return data
def write(self, s):
self.flushbits()
self.stream.write(s)
self.nwrote += len(s)
def flush(self):
self.flushbits()
self.stream.flush()
def flushbits(self):
if len(self.outgoing_bits) > 0:
bytes = []
index = 0
for b in self.outgoing_bits:
if index == 0: bytes.append(0)
if b: bytes[-1] |= 1 << index
index = (index + 1) % 8
del self.outgoing_bits[:]
for byte in bytes:
self.encode_octet(byte)
def pack(self, fmt, *args):
self.write(pack(fmt, *args))
def unpack(self, fmt):
size = calcsize(fmt)
data = self.read(size)
values = unpack(fmt, data)
if len(values) == 1:
return values[0]
else:
return values
def encode(self, type, value):
getattr(self, "encode_" + type)(value)
def decode(self, type):
return getattr(self, "decode_" + type)()
# bit
def encode_bit(self, o):
if o:
self.outgoing_bits.append(True)
else:
self.outgoing_bits.append(False)
def decode_bit(self):
if len(self.incoming_bits) == 0:
bits = self.decode_octet()
for i in range(8):
self.incoming_bits.append(bits >> i & 1 != 0)
return self.incoming_bits.pop(0)
# octet
def encode_octet(self, o):
self.pack("!B", o)
def decode_octet(self):
return self.unpack("!B")
# short
def encode_short(self, o):
self.pack("!H", o)
def decode_short(self):
return self.unpack("!H")
# long
def encode_long(self, o):
self.pack("!L", o)
def decode_long(self):
return self.unpack("!L")
# longlong
def encode_longlong(self, o):
self.pack("!Q", o)
def decode_longlong(self):
return self.unpack("!Q")
def enc_str(self, fmt, s):
size = len(s)
self.pack(fmt, size)
self.write(s)
def dec_str(self, fmt):
size = self.unpack(fmt)
return self.read(size)
# shortstr
def encode_shortstr(self, s):
self.enc_str("!B", s)
def decode_shortstr(self):
return self.dec_str("!B")
# longstr
def encode_longstr(self, s):
if isinstance(s, dict):
self.encode_table(s)
else:
self.enc_str("!L", s)
def decode_longstr(self):
return self.dec_str("!L")
# timestamp
def encode_timestamp(self, o):
self.pack("!Q", o)
def decode_timestamp(self):
return self.unpack("!Q")
# table
def encode_table(self, tbl):
enc = StringIO()
codec = Codec(enc)
for key, value in tbl.items():
codec.encode_shortstr(key)
if isinstance(value, basestring):
codec.write("S")
codec.encode_longstr(value)
else:
codec.write("I")
codec.encode_long(value)
s = enc.getvalue()
self.encode_long(len(s))
self.write(s)
def decode_table(self):
size = self.decode_long()
start = self.nread
result = {}
while self.nread - start < size:
key = self.decode_shortstr()
type = self.read(1)
if type == "S":
value = self.decode_longstr()
elif type == "I":
value = self.decode_long()
elif type == "F":
value = self.decode_table()
else:
raise ValueError(repr(type))
result[key] = value
return result
def test(type, value):
if isinstance(value, (list, tuple)):
values = value
else:
values = [value]
stream = StringIO()
codec = Codec(stream)
for v in values:
codec.encode(type, v)
codec.flush()
enc = stream.getvalue()
stream.reset()
dup = []
for i in xrange(len(values)):
dup.append(codec.decode(type))
if values != dup:
raise AssertionError("%r --> %r --> %r" % (values, enc, dup))
if __name__ == "__main__":
def dotest(type, value):
args = (type, value)
test(*args)
for value in ("1", "0", "110", "011", "11001", "10101", "10011"):
for i in range(10):
dotest("bit", map(lambda x: x == "1", value*i))
for value in ({}, {"asdf": "fdsa", "fdsa": 1, "three": 3}, {"one": 1}):
dotest("table", value)
for type in ("octet", "short", "long", "longlong"):
for value in range(0, 256):
dotest(type, value)
for type in ("shortstr", "longstr"):
for value in ("", "a", "asdf"):
dotest(type, value)
| |
#!/usr/bin/python
#
# Copyright (c) 2009, Whispersoft s.r.l.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Whispersoft s.r.l. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Cosmin Tudorache
#
import sys
import os
import filecmp
from string import Template
import datetime
import shutil
import atoms
template_h = Template("""
//
// WARNING !! This is Auto-Generated code by generate_atoms.py !!!
// do not modify this code
//
#ifndef __MEDIA_F4V_ATOMS_MOVIE_${ATOM}_ATOM_H__
#define __MEDIA_F4V_ATOMS_MOVIE_${ATOM}_ATOM_H__
#include <string>
#include <whisperlib/common/io/buffer/memory_stream.h>
#include <whisperstreamlib/f4v/atoms/base_atom.h>
namespace streaming {
namespace f4v {
class ${Atom}Atom : public BaseAtom {
public:
static const AtomType kType = ATOM_${ATOM};
public:
${Atom}Atom();
${Atom}Atom(const ${Atom}Atom& other);
virtual ~${Atom}Atom();
///////////////////////////////////////////////////////////////////////////
// Methods from BaseAtom
virtual bool EqualsBody(const BaseAtom& other) const;
virtual void GetSubatoms(vector<const BaseAtom*>& subatoms) const;
virtual BaseAtom* Clone() const;
virtual TagDecodeStatus DecodeBody(uint64 size,
io::MemoryStream& in,
Decoder& decoder);
virtual void EncodeBody(io::MemoryStream& out, Encoder& encoder) const;
virtual uint64 MeasureBodySize() const;
virtual string ToStringBody(uint32 indent) const;
private:
io::MemoryStream raw_data_;
};
}
}
#endif // __MEDIA_F4V_ATOMS_MOVIE_${ATOM}_ATOM_H__
""")
template_cc = Template("""
//
// WARNING !! This is Auto-Generated code by generate_atoms.py !!!
// do not modify this code
//
#include <whisperstreamlib/f4v/atoms/movie/auto/${atom}_atom.h>
namespace streaming {
namespace f4v {
${Atom}Atom::${Atom}Atom()
: BaseAtom(kType),
raw_data_() {
}
${Atom}Atom::${Atom}Atom(const ${Atom}Atom& other)
: BaseAtom(other),
raw_data_() {
raw_data_.AppendStreamNonDestructive(&other.raw_data_);
}
${Atom}Atom::~${Atom}Atom() {
}
bool ${Atom}Atom::EqualsBody(const BaseAtom& other) const {
const ${Atom}Atom& a = static_cast<const ${Atom}Atom&>(other);
return raw_data_.Equals(a.raw_data_);
}
void ${Atom}Atom::GetSubatoms(vector<const BaseAtom*>& subatoms) const {
}
BaseAtom* ${Atom}Atom::Clone() const {
return new ${Atom}Atom(*this);
}
TagDecodeStatus ${Atom}Atom::DecodeBody(uint64 size,
io::MemoryStream& in,
Decoder& decoder) {
if ( in.Size() < size ) {
DATOMLOG << "Not enough data in stream: " << in.Size()
<< " is less than expected: " << size;
return TAG_DECODE_NO_DATA;
}
raw_data_.AppendStream(&in, size);
return TAG_DECODE_SUCCESS;
}
void ${Atom}Atom::EncodeBody(io::MemoryStream& out, Encoder& encoder) const {
out.AppendStreamNonDestructive(&raw_data_);
}
uint64 ${Atom}Atom::MeasureBodySize() const {
return raw_data_.Size();
}
string ${Atom}Atom::ToStringBody(uint32 indent) const {
ostringstream oss;
oss << "raw_data_: " << raw_data_.Size() << " bytes";
return oss.str();
}
}
}
""")
cache = "cache"
backup = "backup"
# [IN] path: string, directory to list
# [OUT] out_dirs: list, receives the list of directories
# [OUT] out_files: list, receives the list of files
def ListDir(path, out_dirs, out_files):
items = os.listdir(path);
for i in items:
f = os.path.join(path,i)
if os.path.isfile(f):
out_files.append(f)
continue
if os.path.isdir(f):
out_dirs.append(f)
continue
out_dirs.sort()
out_files.sort()
def Cache(filename):
return os.path.join(os.path.dirname(filename),
cache, os.path.basename(filename))
def CacheDir(output_dir):
return os.path.join(output_dir, cache)
def Backup(filename):
return os.path.join(os.path.dirname(filename),
backup, os.path.basename(filename))
def BackupDir(output_dir):
return os.path.join(output_dir, backup)
def IsModified(filename):
if not os.path.exists(filename):
return False
cache_file = Cache(filename)
if not os.path.exists(cache_file):
print "Cache file: [%s] not found" % cache_file
return True
if not filecmp.cmp(filename, cache_file):
print "File [%s] has been modified" % filename
return True
return False
def Mkdir(dirname):
if not os.path.isdir(dirname):
print "Creating directory: %s" % dirname
os.mkdir(dirname)
else:
print "Directory: [%s] already exists" % dirname
def BackupFile(filename):
if not os.path.exists(filename):
return
today = datetime.datetime.today()
backfile = Backup(filename) + "." + today.strftime("%Y-%m-%d_%H-%M-%S")
shutil.copyfile(filename, backfile)
print "Backup file: %s" % backfile
def DeleteOldBackup(output_dir, keep_last_n_versions):
baks = []
bakdirs = []
ListDir(BackupDir(output_dir), bakdirs, baks)
baks.sort()
last_file_base = ''
same_file = 0
removed_count = 0
for b in baks:
bbase = os.path.basename(b)
orig = os.path.splitext(bbase)[0]
if ( last_file_base != orig ):
last_file_base = orig
same_file = 1
continue
same_file += 1
if ( same_file > keep_last_n_versions ):
removed_count += 1
os.unlink(b)
print "DeleteOldBackup removed %s files" % removed_count
def CacheFile(filename):
cachefile = Cache(filename)
shutil.copyfile(filename, cachefile)
print "Cache file: %s" % cachefile
def GenerateF4vAtoms(output_dir, all_atoms, autogen):
print "Generating atoms in dir: %s" % output_dir
if not autogen:
Mkdir(CacheDir(output_dir))
Mkdir(BackupDir(output_dir))
src = atoms.autogen_atoms
if all_atoms:
src = atoms.atoms
for a in src:
print "Checking atom: %s" % a
atomname = a.lower()
AtomName = a[0].upper() + a[1:].lower()
ATOMNAME = a.upper()
file_dict = { 'atom' : atomname,
'Atom' : AtomName,
'ATOM' : ATOMNAME
}
data_h = template_h.substitute(file_dict)
data_cc = template_cc.substitute(file_dict)
filename_h = os.path.join(output_dir, a.lower() + "_atom.h")
filename_cc = os.path.join(output_dir, a.lower() + "_atom.cc")
if autogen:
open(filename_h, "w").write(data_h)
open(filename_cc, "w").write(data_cc)
else:
if not os.path.exists(Cache(filename_h)):
open(Cache(filename_h), "w").write(data_h)
if not os.path.exists(Cache(filename_cc)):
open(Cache(filename_cc), "w").write(data_cc)
if IsModified(filename_h) or IsModified(filename_cc):
print "Skip atom %s because of existing modified file" % a
continue
BackupFile(filename_h)
open(filename_h, "w").write(data_h)
CacheFile(filename_h)
BackupFile(filename_cc)
open(filename_cc, "w").write(data_cc)
CacheFile(filename_cc)
print "Generated files for atom: %s" % a
if not autogen:
DeleteOldBackup(output_dir, 10)
def main():
all_atoms = False
autogen = False
if len(sys.argv) > 1:
output_dir = sys.argv[1]
if len(sys.argv) > 2 and sys.argv[2] == 'all_atoms':
all_atoms = True
if len(sys.argv) > 2 and sys.argv[2] == 'autogen':
autogen = True
else:
print "Usage %s <output_dir>" % sys.argv[0]
return
if not os.path.exists(output_dir):
os.mkdir(output_dir)
GenerateF4vAtoms(output_dir, all_atoms, autogen)
if __name__ == "__main__":
main()
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1TokenReview(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1TokenReviewSpec',
'status': 'V1TokenReviewStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1TokenReview - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1TokenReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1TokenReview.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1TokenReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1TokenReview.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1TokenReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1TokenReview.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1TokenReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1TokenReview.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1TokenReview.
:return: The metadata of this V1TokenReview.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1TokenReview.
:param metadata: The metadata of this V1TokenReview.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1TokenReview.
Spec holds information about the request being evaluated
:return: The spec of this V1TokenReview.
:rtype: V1TokenReviewSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1TokenReview.
Spec holds information about the request being evaluated
:param spec: The spec of this V1TokenReview.
:type: V1TokenReviewSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`")
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1TokenReview.
Status is filled in by the server and indicates whether the request can be authenticated.
:return: The status of this V1TokenReview.
:rtype: V1TokenReviewStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1TokenReview.
Status is filled in by the server and indicates whether the request can be authenticated.
:param status: The status of this V1TokenReview.
:type: V1TokenReviewStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1TokenReview):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
# -*- coding: utf-8 -*-
import numpy as np
from ..Qt import QtGui, QtCore
from ..python2_3 import asUnicode, basestring
from .. import metaarray
__all__ = ['TableWidget']
def _defersort(fn):
def defersort(self, *args, **kwds):
# may be called recursively; only the first call needs to block sorting
setSorting = False
if self._sorting is None:
self._sorting = self.isSortingEnabled()
setSorting = True
self.setSortingEnabled(False)
try:
return fn(self, *args, **kwds)
finally:
if setSorting:
self.setSortingEnabled(self._sorting)
self._sorting = None
return defersort
class TableWidget(QtGui.QTableWidget):
"""Extends QTableWidget with some useful functions for automatic data handling
and copy / export context menu. Can automatically format and display a variety
of data types (see :func:`setData() <pyqtgraph.TableWidget.setData>` for more
information.
"""
def __init__(self, *args, **kwds):
"""
All positional arguments are passed to QTableWidget.__init__().
===================== =================================================
**Keyword Arguments**
editable (bool) If True, cells in the table can be edited
by the user. Default is False.
sortable (bool) If True, the table may be soted by
clicking on column headers. Note that this also
causes rows to appear initially shuffled until
a sort column is selected. Default is True.
*(added in version 0.9.9)*
===================== =================================================
"""
QtGui.QTableWidget.__init__(self, *args)
self.itemClass = TableWidgetItem
self.setVerticalScrollMode(self.ScrollPerPixel)
self.setSelectionMode(QtGui.QAbstractItemView.ContiguousSelection)
self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
self.clear()
kwds.setdefault('sortable', True)
kwds.setdefault('editable', False)
self.setEditable(kwds.pop('editable'))
self.setSortingEnabled(kwds.pop('sortable'))
if len(kwds) > 0:
raise TypeError("Invalid keyword arguments '%s'" % kwds.keys())
self._sorting = None # used when temporarily disabling sorting
self._formats = {None: None} # stores per-column formats and entire table format
self.sortModes = {} # stores per-column sort mode
self.itemChanged.connect(self.handleItemChanged)
self.contextMenu = QtGui.QMenu()
self.contextMenu.addAction('Copy Selection').triggered.connect(self.copySel)
self.contextMenu.addAction('Copy All').triggered.connect(self.copyAll)
self.contextMenu.addAction('Save Selection').triggered.connect(self.saveSel)
self.contextMenu.addAction('Save All').triggered.connect(self.saveAll)
def clear(self):
"""Clear all contents from the table."""
QtGui.QTableWidget.clear(self)
self.verticalHeadersSet = False
self.horizontalHeadersSet = False
self.items = []
self.setRowCount(0)
self.setColumnCount(0)
self.sortModes = {}
def setData(self, data):
"""Set the data displayed in the table.
Allowed formats are:
* numpy arrays
* numpy record arrays
* metaarrays
* list-of-lists [[1,2,3], [4,5,6]]
* dict-of-lists {'x': [1,2,3], 'y': [4,5,6]}
* list-of-dicts [{'x': 1, 'y': 4}, {'x': 2, 'y': 5}, ...]
"""
self.clear()
self.appendData(data)
self.resizeColumnsToContents()
@_defersort
def appendData(self, data):
"""
Add new rows to the table.
See :func:`setData() <pyqtgraph.TableWidget.setData>` for accepted
data types.
"""
startRow = self.rowCount()
fn0, header0 = self.iteratorFn(data)
if fn0 is None:
self.clear()
return
it0 = fn0(data)
try:
first = next(it0)
except StopIteration:
return
fn1, header1 = self.iteratorFn(first)
if fn1 is None:
self.clear()
return
firstVals = [x for x in fn1(first)]
self.setColumnCount(len(firstVals))
if not self.verticalHeadersSet and header0 is not None:
labels = [self.verticalHeaderItem(i).text() for i in range(self.rowCount())]
self.setRowCount(startRow + len(header0))
self.setVerticalHeaderLabels(labels + header0)
self.verticalHeadersSet = True
if not self.horizontalHeadersSet and header1 is not None:
self.setHorizontalHeaderLabels(header1)
self.horizontalHeadersSet = True
i = startRow
self.setRow(i, firstVals)
for row in it0:
i += 1
self.setRow(i, [x for x in fn1(row)])
if self._sorting and self.horizontalHeader().sortIndicatorSection() >= self.columnCount():
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
def setEditable(self, editable=True):
self.editable = editable
for item in self.items:
item.setEditable(editable)
def setFormat(self, format, column=None):
"""
Specify the default text formatting for the entire table, or for a
single column if *column* is specified.
If a string is specified, it is used as a format string for converting
float values (and all other types are converted using str). If a
function is specified, it will be called with the item as its only
argument and must return a string. Setting format = None causes the
default formatter to be used instead.
Added in version 0.9.9.
"""
if format is not None and not isinstance(format, basestring) and not callable(format):
raise ValueError("Format argument must string, callable, or None. (got %s)" % format)
self._formats[column] = format
if column is None:
# update format of all items that do not have a column format
# specified
for c in range(self.columnCount()):
if self._formats.get(c, None) is None:
for r in range(self.rowCount()):
item = self.item(r, c)
if item is None:
continue
item.setFormat(format)
else:
# set all items in the column to use this format, or the default
# table format if None was specified.
if format is None:
format = self._formats[None]
for r in range(self.rowCount()):
item = self.item(r, column)
if item is None:
continue
item.setFormat(format)
def iteratorFn(self, data):
## Return 1) a function that will provide an iterator for data and 2) a list of header strings
if isinstance(data, list) or isinstance(data, tuple):
return lambda d: d.__iter__(), None
elif isinstance(data, dict):
return lambda d: iter(d.values()), list(map(asUnicode, data.keys()))
elif (hasattr(data, 'implements') and data.implements('MetaArray')):
if data.axisHasColumns(0):
header = [asUnicode(data.columnName(0, i)) for i in range(data.shape[0])]
elif data.axisHasValues(0):
header = list(map(asUnicode, data.xvals(0)))
else:
header = None
return self.iterFirstAxis, header
elif isinstance(data, np.ndarray):
return self.iterFirstAxis, None
elif isinstance(data, np.void):
return self.iterate, list(map(asUnicode, data.dtype.names))
elif data is None:
return (None,None)
else:
msg = "Don't know how to iterate over data type: {!s}".format(type(data))
raise TypeError(msg)
def iterFirstAxis(self, data):
for i in range(data.shape[0]):
yield data[i]
def iterate(self, data):
# for numpy.void, which can be iterated but mysteriously
# has no __iter__ (??)
for x in data:
yield x
def appendRow(self, data):
self.appendData([data])
@_defersort
def addRow(self, vals):
row = self.rowCount()
self.setRowCount(row + 1)
self.setRow(row, vals)
@_defersort
def setRow(self, row, vals):
if row > self.rowCount() - 1:
self.setRowCount(row + 1)
for col in range(len(vals)):
val = vals[col]
item = self.itemClass(val, row)
item.setEditable(self.editable)
sortMode = self.sortModes.get(col, None)
if sortMode is not None:
item.setSortMode(sortMode)
format = self._formats.get(col, self._formats[None])
item.setFormat(format)
self.items.append(item)
self.setItem(row, col, item)
item.setValue(val) # Required--the text-change callback is invoked
# when we call setItem.
def setSortMode(self, column, mode):
"""
Set the mode used to sort *column*.
============== ========================================================
**Sort Modes**
value Compares item.value if available; falls back to text
comparison.
text Compares item.text()
index Compares by the order in which items were inserted.
============== ========================================================
Added in version 0.9.9
"""
for r in range(self.rowCount()):
item = self.item(r, column)
if hasattr(item, 'setSortMode'):
item.setSortMode(mode)
self.sortModes[column] = mode
def sizeHint(self):
# based on http://stackoverflow.com/a/7195443/54056
width = sum(self.columnWidth(i) for i in range(self.columnCount()))
width += self.verticalHeader().sizeHint().width()
width += self.verticalScrollBar().sizeHint().width()
width += self.frameWidth() * 2
height = sum(self.rowHeight(i) for i in range(self.rowCount()))
height += self.verticalHeader().sizeHint().height()
height += self.horizontalScrollBar().sizeHint().height()
return QtCore.QSize(width, height)
def serialize(self, useSelection=False):
"""Convert entire table (or just selected area) into tab-separated text values"""
if useSelection:
selection = self.selectedRanges()[0]
rows = list(range(selection.topRow(),
selection.bottomRow() + 1))
columns = list(range(selection.leftColumn(),
selection.rightColumn() + 1))
else:
rows = list(range(self.rowCount()))
columns = list(range(self.columnCount()))
data = []
if self.horizontalHeadersSet:
row = []
if self.verticalHeadersSet:
row.append(asUnicode(''))
for c in columns:
row.append(asUnicode(self.horizontalHeaderItem(c).text()))
data.append(row)
for r in rows:
row = []
if self.verticalHeadersSet:
row.append(asUnicode(self.verticalHeaderItem(r).text()))
for c in columns:
item = self.item(r, c)
if item is not None:
row.append(asUnicode(item.value))
else:
row.append(asUnicode(''))
data.append(row)
s = ''
for row in data:
s += ('\t'.join(row) + '\n')
return s
def copySel(self):
"""Copy selected data to clipboard."""
QtGui.QApplication.clipboard().setText(self.serialize(useSelection=True))
def copyAll(self):
"""Copy all data to clipboard."""
QtGui.QApplication.clipboard().setText(self.serialize(useSelection=False))
def saveSel(self):
"""Save selected data to file."""
self.save(self.serialize(useSelection=True))
def saveAll(self):
"""Save all data to file."""
self.save(self.serialize(useSelection=False))
def save(self, data):
fileName = QtGui.QFileDialog.getSaveFileName(self, "Save As..", "", "Tab-separated values (*.tsv)")
if fileName == '':
return
open(fileName, 'w').write(data)
def contextMenuEvent(self, ev):
self.contextMenu.popup(ev.globalPos())
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_C and ev.modifiers() == QtCore.Qt.ControlModifier:
ev.accept()
self.copySel()
else:
QtGui.QTableWidget.keyPressEvent(self, ev)
def handleItemChanged(self, item):
item.itemChanged()
class TableWidgetItem(QtGui.QTableWidgetItem):
def __init__(self, val, index, format=None):
QtGui.QTableWidgetItem.__init__(self, '')
self._blockValueChange = False
self._format = None
self._defaultFormat = '%0.3g'
self.sortMode = 'value'
self.index = index
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
self.setFlags(flags)
self.setValue(val)
self.setFormat(format)
def setEditable(self, editable):
"""
Set whether this item is user-editable.
"""
if editable:
self.setFlags(self.flags() | QtCore.Qt.ItemIsEditable)
else:
self.setFlags(self.flags() & ~QtCore.Qt.ItemIsEditable)
def setSortMode(self, mode):
"""
Set the mode used to sort this item against others in its column.
============== ========================================================
**Sort Modes**
value Compares item.value if available; falls back to text
comparison.
text Compares item.text()
index Compares by the order in which items were inserted.
============== ========================================================
"""
modes = ('value', 'text', 'index', None)
if mode not in modes:
raise ValueError('Sort mode must be one of %s' % str(modes))
self.sortMode = mode
def setFormat(self, fmt):
"""Define the conversion from item value to displayed text.
If a string is specified, it is used as a format string for converting
float values (and all other types are converted using str). If a
function is specified, it will be called with the item as its only
argument and must return a string.
Added in version 0.9.9.
"""
if fmt is not None and not isinstance(fmt, basestring) and not callable(fmt):
raise ValueError("Format argument must string, callable, or None. (got %s)" % fmt)
self._format = fmt
self._updateText()
def _updateText(self):
self._blockValueChange = True
try:
self._text = self.format()
self.setText(self._text)
finally:
self._blockValueChange = False
def setValue(self, value):
self.value = value
self._updateText()
def itemChanged(self):
"""Called when the data of this item has changed."""
if self.text() != self._text:
self.textChanged()
def textChanged(self):
"""Called when this item's text has changed for any reason."""
self._text = self.text()
if self._blockValueChange:
# text change was result of value or format change; do not
# propagate.
return
import traceback
print "====================", self.text()
traceback.print_stack()
try:
self.value = type(self.value)(self.text())
except ValueError:
self.value = str(self.text())
def format(self):
if callable(self._format):
return self._format(self)
if isinstance(self.value, (float, np.floating)):
if self._format is None:
return self._defaultFormat % self.value
else:
return self._format % self.value
else:
return asUnicode(self.value)
def __lt__(self, other):
if self.sortMode == 'index' and hasattr(other, 'index'):
return self.index < other.index
if self.sortMode == 'value' and hasattr(other, 'value'):
return self.value < other.value
else:
return self.text() < other.text()
if __name__ == '__main__':
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
t = TableWidget()
win.setCentralWidget(t)
win.resize(800,600)
win.show()
ll = [[1,2,3,4,5]] * 20
ld = [{'x': 1, 'y': 2, 'z': 3}] * 20
dl = {'x': list(range(20)), 'y': list(range(20)), 'z': list(range(20))}
a = np.ones((20, 5))
ra = np.ones((20,), dtype=[('x', int), ('y', int), ('z', int)])
t.setData(ll)
ma = metaarray.MetaArray(np.ones((20, 3)), info=[
{'values': np.linspace(1, 5, 20)},
{'cols': [
{'name': 'x'},
{'name': 'y'},
{'name': 'z'},
]}
])
t.setData(ma)
| |
from werkzeug.utils import secure_filename
import os, string
from globe import app, db
from globe import models
from globe.models import Post
from flask import session
from globe.util import id_gen, clock
import boto3
def new(formParams):
file = formParams['file']
filename = file.filename
print formParams['pano']
if check(file, filename, formParams):
if upload_to_s3(file, filename, formParams):
return True
else:
return False
def check(file, filename, formParams):
#test the file for filename and width
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
if formParams['pano'] == "True":
crop_if_too_wide(file, filename, formParams)
#image is uploaded from the crop_if_too_wide function here
else:
print "image is 2d, not checking width"
cache = os.environ['LOCAL_STORAGE_CACHE']
# need to add filename to save it
filename = "altered_" + filename
destAndName = cache + "/" + filename
file.save(destAndName)
upload_to_s3(destAndName, filename, formParams)
else:
return False
'''def save_locally(file, filename, formParams):
print 'saving'
postID = id_gen.postID(4, string.digits, session['g_user'])
dest = os.environ['UPLOAD_PATH'] + str(session['g_user']) + "/posts/" + str(postID) + "/"
os.mkdir(dest)
filePath = os.path.join(dest + filename)
file.save(filePath)
if formParams['pano'] == "True":
filePath = os.path.join(dest + filename)
crop(filePath, filename)
return create(filePath, formParams) '''
def upload_to_s3(filePath, filename, formParams):
#uploads the image to S3 after cropping etc
postID = id_gen.postID(4, string.digits, session['g_user'])
dest = str(session['g_user']) + "/posts/" + str(postID) + "/"
print dest
print filename
print filePath
with open(filePath, 'rb') as image:
print "image now: %s" % image
s3 = boto3.client(
"s3",
aws_access_key_id=os.environ['S3_PUB_KEY'],
aws_secret_access_key=os.environ['S3_PRIVATE_KEY']
)
try:
print "trying to upload..."
s3.upload_fileobj(
image,
os.environ['S3_BUCKET_NAME'],
#TO DO: modify this param to use the correct path, as the above only takes the bucket name, need to add /static/user_... etc
dest + filename,
ExtraArgs={
"ACL": "public-read"
}
)
print "done!"
url = "https://" + os.environ['S3_ENDPOINT'] + "/" + os.environ['S3_BUCKET_NAME'] + "/" + dest + filename
print url
#delete the modified file from the system
os.remove(filePath)
return create(url, formParams)
except Exception as e:
# This is a catch all exception, edit this part to fit your needs.
print("Something Happened: ", e)
return False
image.close()
def create(url, formParams):
print "creating new post..."
#adds post to database
postCount = Post.query.count()
postCount = postCount + 1
post = Post(
id=postCount,
author=formParams['user'],
postedOn=str(clock.timeNow()),
postContent=formParams['desc'],
likes="0",
image=url,
city=formParams['city'],
coordinates=formParams['coords'],
appreaciated=True,
isPanorama=formParams['pano']
)
db.session.add(post)
db.session.commit()
print "did-diddly-done!"
return True
# Helpers
def crop_if_too_wide(file, filename, formParams):
cache = os.environ['LOCAL_STORAGE_CACHE']
# need to add filename to save it
destAndName = cache + filename
try:
file.save(destAndName)
print ("saved to: ", destAndName)
except Exception as error:
print ("couldn't save file: " , error)
return file
from wand.image import Image
from wand.display import display
img = Image(filename=destAndName)
print img
width = img.width
height = img.height
with img.clone() as i:
#if the width of an image is wider than 4096px, crop the width, but leave the height
if width > 4096:
print "Image too wide, cropping width"
i.crop(0, 0, 4096, height)
# file doesn't overwrite, so give it a new name
name = cache + "-" + filename
print name
i.save(filename=name)
#delete old file
os.remove(destAndName)
else:
print "image is less than 4096px wide, skipping"
return upload_to_s3(name, filename, formParams)
allowedExtensions= set(['jpg', 'jpeg', 'png'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in allowedExtensions
'''def upload(modifiedFile, formParams):
filename = modifiedFile.filename
postID = id_gen.postID(4, string.digits, session['g_user'])
dest = str(session['g_user']) + "/posts/" + str(postID) + "/"
# uploads the file to S3
print modifiedFile
print "dir: %s" % dest
s3 = boto3.client(
"s3",
aws_access_key_id=os.environ['S3_PUB_KEY'],
aws_secret_access_key=os.environ['S3_PRIVATE_KEY']
)
try:
s3.upload_fileobj(
modifiedFile,
os.environ['S3_BUCKET_NAME'],
#TO DO: modify this param to use the correct path, as the above only takes the bucket name, need to add /static/user_... etc
dest + filename,
ExtraArgs={
"ACL": "public-read",
"ContentType": modifiedFile.content_type
}
)
except Exception as e:
# This is a catch all exception, edit this part to fit your needs.
print("Something Happened: ", e)
return False
url = os.environ['S3_ENDPOINT'] + "/" + os.environ['S3_BUCKET_NAME'] + "{}{}".format(dest, filename)
return create(url, formParams)
dest = app.config['UPLOAD_FOLDER'] + str(session['g_user']) + "/posts/" + str(postID) + "/"
print dest
#if the image is a panorama, check the width
# if it needs to be cropped, save it then update the folder
if request.form['image-type'] == "True":
print "creating temp directory to crop"
tempDir = os.environ['TEMP_FILE_CACHE'] + file.filename
file.save(tempDir)
crop(tempDir)
dest = tempDir
#use the cropped image
with open(tempDir) as f:
file = f
if files.passes_checks(file, dest, isPanorama):
if files.upload_to_s3(file, dest):
return redirect(url_for('load_feed'))
else:
return "File failed to upload!"
else:
return "File failed checks!"
else:
return redirect(url_for('load_feed'))
'''
| |
"""
sqery.py: Helpers for working with databases
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
from __future__ import print_function
import os
import re
import logging
import sqlite3
import inspect
import calendar
import datetime
import functools
import contextlib
from sqlize import (From, Where, Group, Order, Limit, Select, Update, Delete,
Insert, Replace, sqlin, sqlarray, NATURAL, INNER, CROSS,
OUTER, LEFT_OUTER, LEFT, JOIN)
from pytz import utc
from .migrations import migrate
from .utils import basestring
SLASH = re.compile(r'\\')
SQLITE_DATE_TYPES = ('date', 'datetime', 'timestamp')
MAX_VARIABLE_NUMBER = 999
def from_utc_timestamp(timestamp):
"""Converts the passed-in unix UTC timestamp into a datetime object."""
dt = datetime.datetime.utcfromtimestamp(float(timestamp))
return dt.replace(tzinfo=utc)
def to_utc_timestamp(dt):
"""Converts the passed-in datetime object into a unix UTC timestamp."""
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
msg = "Naive datetime object passed. It is assumed that it's in UTC."
logging.warning(msg)
return calendar.timegm(dt.timetuple())
sqlite3.register_adapter(datetime.datetime, to_utc_timestamp)
for date_type in SQLITE_DATE_TYPES:
sqlite3.register_converter(date_type, from_utc_timestamp)
def convert_query(fn):
""" Ensure any SQLExpression instances are serialized
:param qry: raw SQL string or SQLExpression instance
:returns: raw SQL string
"""
@functools.wraps(fn)
def wrapper(self, qry, *args, **kwargs):
if hasattr(qry, 'serialize'):
qry = qry.serialize()
assert isinstance(qry, basestring), 'Expected qry to be string'
if self.debug:
logging.debug('SQL: %s', qry)
return fn(self, qry, *args, **kwargs)
return wrapper
class Row(sqlite3.Row):
""" sqlite.Row subclass that allows attribute access to items """
def __getattr__(self, key):
return self[key]
def get(self, key, default=None):
key = str(key)
try:
return self[key]
except IndexError:
return default
def __contains__(self, key):
return key in self.keys()
class SQLMixin(object):
sqlin = staticmethod(sqlin)
sqlarray = staticmethod(sqlarray)
From = From
Where = Where
Group = Group
Order = Order
Limit = Limit
Select = Select
Update = Update
Delete = Delete
Insert = Insert
Replace = Replace
MAX_VARIABLE_NUMBER = MAX_VARIABLE_NUMBER
NATURAL = NATURAL
INNER = INNER
CROSS = CROSS
OUTER = OUTER
LEFT_OUTER = LEFT_OUTER
LEFT = LEFT
JOIN = JOIN
class Connection(SQLMixin):
""" Wrapper for sqlite3.Connection object """
def __init__(self, path=':memory:', funcs=[], aggregates=[]):
self.path = path
self.funcs = funcs
self.aggregates = aggregates
self.connect()
def connect(self):
self._conn = sqlite3.connect(self.path,
detect_types=sqlite3.PARSE_DECLTYPES)
self._conn.row_factory = Row
# Allow manual transaction handling, see http://bit.ly/1C7E7EQ
self._conn.isolation_level = None
for fn in self.funcs:
self.add_func(fn)
for aggr in self.aggregates:
self.add_aggregate(aggr)
# More on WAL: https://www.sqlite.org/isolation.html
# Requires SQLite >= 3.7.0
cur = self._conn.cursor()
cur.execute('PRAGMA journal_mode=WAL;')
logging.debug('Connected to database {}'.format(self.path))
def add_func(self, fn):
self._conn.create_function(*self.inspect_fn(fn))
def add_aggregate(self, aggr):
self._conn.create_aggregate(*self.inspect_aggr(aggr))
def close(self):
self._conn.commit()
self._conn.close()
def new(self):
"""
Establish a new connection to the same database as this one and return
a new instance of the ``Connection`` object.
"""
return self.__class__(self.path)
@staticmethod
def inspect_fn(fn):
try:
name = fn.__name__
except AttributeError:
# This is a callable object, but not a function
name = fn.__class__.__name__.lower()
try:
nargs = len(inspect.getargspec(fn).args)
except TypeError:
# This is a callable object, but not a function
nargs = len(inspect.getargspec(fn.__call__).args) - 1
return (name, nargs, fn)
@staticmethod
def inspect_aggr(cls):
name = cls.__name__.lower()
nargs = len(inspect.getargspec(cls.step).args) - 1
return (name, nargs, cls)
def __getattr__(self, attr):
conn = object.__getattribute__(self, '_conn')
return getattr(conn, attr)
def __setattr__(self, attr, value):
if not hasattr(self, attr) or attr == '_conn':
object.__setattr__(self, attr, value)
else:
setattr(self._conn, attr, value)
def __repr__(self):
return "<Connection path='%s'>" % self.path
class Cursor(SQLMixin):
def __init__(self, connection, debug=False):
self.conn = connection
self.cursor = connection.cursor()
self.debug = debug
@property
def results(self):
return self.cursor.fetchall()
@property
def result(self):
return self.cursor.fetchone()
def __iter__(self):
return self.cursor
@convert_query
def query(self, qry, *params, **kwparams):
""" Perform a query
Any positional arguments are converted to a list of arguments for the
query, and are used to populate any '?' placeholders. The keyword
arguments are converted to a mapping which provides values to ':name'
placeholders. These do not apply to SQLExpression instances.
:param qry: raw SQL or SQLExpression instance
:returns: cursor object
"""
self.cursor.execute(qry, params or kwparams)
return self
@convert_query
def execute(self, qry, *args, **kwargs):
self.cursor.execute(qry, *args, **kwargs)
return self
@convert_query
def executemany(self, qry, *args, **kwargs):
self.cursor.executemany(qry, *args, **kwargs)
return self
def executescript(self, sql):
self.cursor.executescript(sql)
return self
class Database(SQLMixin):
migrate = staticmethod(migrate)
def __init__(self, conn, debug=False):
self.conn = conn
self.debug = debug
def cursor(self, debug=None, connection=None):
"""
Return a new cursor
"""
if connection is None:
connection = self.conn
debug = self.debug if debug is None else debug
return Cursor(connection, debug)
def query(self, qry, *params, **kwparams):
""" Perform a query
Any positional arguments are converted to a list of arguments for the
query, and are used to populate any '?' placeholders. The keyword
arguments are converted to a mapping which provides values to ':name'
placeholders. These do not apply to SQLExpression instances.
:param qry: raw SQL or SQLExpression instance
:returns: cursor object
"""
cursor = self.cursor()
cursor.query(qry, *params, **kwparams)
return cursor
def execute(self, qry, *args, **kwargs):
cursor = self.cursor()
cursor.execute(qry, *args, **kwargs)
return cursor
def executemany(self, qry, *args, **kwargs):
cursor = self.cursor()
cursor.executemany(qry, *args, **kwargs)
return cursor
def executescript(self, sql):
cursor = self.cursor()
cursor.executescript(sql)
return cursor
def commit(self):
self.conn.commit()
return self
def rollback(self):
self.conn.rollback()
self.conn.commit()
return self
def refresh_table_stats(self):
return self.execute('ANALYZE sqlite_master;')
def acquire_lock(self):
return self.execute('BEGIN EXCLUSIVE;')
def close(self):
self.conn.close()
return self
def reconnect(self):
self.conn.connect()
return self
@property
def connection(self):
return self.conn
@contextlib.contextmanager
def transaction(self, silent=False, new_connection=False, exclusive=False):
if new_connection:
cursor = self.cursor(connection=self.conn.new())
else:
cursor = self.cursor()
if exclusive:
cursor.execute('BEGIN EXCLUSIVE;')
else:
cursor.execute('BEGIN;')
try:
yield cursor
cursor.conn.commit()
except Exception:
cursor.conn.rollback()
if silent:
return
raise
finally:
if new_connection:
cursor.conn.close()
@classmethod
def connect(cls, database, **kwargs):
return Connection(database)
def recreate(self, path):
self.drop(path)
self.connect(path)
return self
@classmethod
def drop(cls, path):
os.remove(path)
def __repr__(self):
return "<Database connection='%s'>" % self.conn.path
class DatabaseContainer(dict):
def __init__(self, connections, debug=False):
databases = dict((n, Database(c, debug=debug))
for n, c in connections.items())
super(DatabaseContainer, self).__init__(databases)
self.__dict__ = self
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
import logging
import hashlib
import httplib
import tempfile
import urlparse
from glance.common import cfg
from glance.common import exception
import glance.store
import glance.store.base
import glance.store.location
logger = logging.getLogger('glance.store.s3')
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing an S3 URI. An S3 URI can look like any of
the following:
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+http://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
The s3+https:// URIs indicate there is an HTTPS s3service URL
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 's3')
self.accesskey = self.specs.get('accesskey')
self.secretkey = self.specs.get('secretkey')
s3_host = self.specs.get('s3serviceurl')
self.bucket = self.specs.get('bucket')
self.key = self.specs.get('key')
if s3_host.startswith('https://'):
self.scheme = 's3+https'
s3_host = s3_host[8:].strip('/')
elif s3_host.startswith('http://'):
s3_host = s3_host[7:].strip('/')
self.s3serviceurl = s3_host.strip('/')
def _get_credstring(self):
if self.accesskey:
return '%s:%s@' % (self.accesskey, self.secretkey)
return ''
def get_uri(self):
return "%s://%s%s/%s/%s" % (
self.scheme,
self._get_credstring(),
self.s3serviceurl,
self.bucket,
self.key)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
Note that an Amazon AWS secret key can contain the forward slash,
which is entirely retarded, and breaks urlparse miserably.
This function works around that issue.
"""
# Make sure that URIs that contain multiple schemes, such as:
# s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id
# are immediately rejected.
if uri.count('://') != 1:
reason = _(
"URI cannot contain more than one occurrence of a scheme."
"If you have specified a URI like "
"s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/"
"key-id"
", you need to change it to use the s3+https:// scheme, "
"like so: "
"s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/"
"key-id"
)
raise exception.BadStoreUri(uri, reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('s3', 's3+http', 's3+https')
self.scheme = pieces.scheme
path = pieces.path.strip('/')
netloc = pieces.netloc.strip('/')
entire_path = (netloc + '/' + path).strip('/')
if '@' in uri:
creds, path = entire_path.split('@')
cred_parts = creds.split(':')
try:
access_key = cred_parts[0]
secret_key = cred_parts[1]
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
access_key = access_key.encode('utf-8')
secret_key = secret_key.encode('utf-8')
self.accesskey = access_key
self.secretkey = secret_key
except IndexError:
reason = _("Badly formed S3 credentials %s") % creds
raise exception.BadStoreUri(uri, reason)
else:
self.accesskey = None
path = entire_path
try:
path_parts = path.split('/')
self.key = path_parts.pop()
self.bucket = path_parts.pop()
if len(path_parts) > 0:
self.s3serviceurl = '/'.join(path_parts).strip('/')
else:
reason = _("Badly formed S3 URI. Missing s3 service URL.")
raise exception.BadStoreUri(uri, reason)
except IndexError:
reason = _("Badly formed S3 URI")
raise exception.BadStoreUri(uri, reason)
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a ``boto.s3.key.Key``
"""
CHUNKSIZE = 65536
def __init__(self, fp):
self.fp = fp
def __iter__(self):
"""Return an iterator over the image file"""
try:
while True:
chunk = self.fp.read(ChunkedFile.CHUNKSIZE)
if chunk:
yield chunk
else:
break
finally:
self.close()
def getvalue(self):
"""Return entire string value... used in testing"""
data = ""
self.len = 0
for chunk in self:
read_bytes = len(chunk)
data = data + chunk
self.len = self.len + read_bytes
return data
def close(self):
"""Close the internal file pointer"""
if self.fp:
self.fp.close()
self.fp = None
class Store(glance.store.base.Store):
"""An implementation of the s3 adapter."""
EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>"
opts = [
cfg.StrOpt('s3_store_host'),
cfg.StrOpt('s3_store_access_key'),
cfg.StrOpt('s3_store_secret_key'),
cfg.StrOpt('s3_store_bucket'),
cfg.StrOpt('s3_store_object_buffer_dir'),
cfg.BoolOpt('s3_store_create_bucket_on_put', default=False),
]
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
self.conf.register_opts(self.opts)
self.s3_host = self._option_get('s3_store_host')
access_key = self._option_get('s3_store_access_key')
secret_key = self._option_get('s3_store_secret_key')
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
self.access_key = access_key.encode('utf-8')
self.secret_key = secret_key.encode('utf-8')
self.bucket = self._option_get('s3_store_bucket')
self.scheme = 's3'
if self.s3_host.startswith('https://'):
self.scheme = 's3+https'
self.full_s3_host = self.s3_host
elif self.s3_host.startswith('http://'):
self.full_s3_host = self.s3_host
else: # Defaults http
self.full_s3_host = 'http://' + self.s3_host
self.s3_store_object_buffer_dir = \
self.conf.s3_store_object_buffer_dir
def _option_get(self, param):
result = getattr(self.conf, param)
if not result:
reason = _("Could not find %(param)s in configuration "
"options.") % locals()
logger.error(reason)
raise exception.BadStoreConfiguration(store_name="s3",
reason=reason)
return result
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
key = self._retrieve_key(location)
key.BufferSize = self.CHUNKSIZE
return (ChunkedFile(key), key.size)
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the image_size (or 0
if unavailable)
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
try:
key = self._retrieve_key(location)
return key.size
except Exception:
return 0
def _retrieve_key(self, location):
loc = location.store_location
from boto.s3.connection import S3Connection
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'))
bucket_obj = get_bucket(s3_conn, loc.bucket)
key = get_key(bucket_obj, loc.key)
msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey, 'bucket': loc.bucket,
'obj_name': loc.key})
logger.debug(msg)
return key
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns an `glance.store.ImageAddResult` object
containing information about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval `glance.store.ImageAddResult` object
:raises `glance.common.exception.Duplicate` if the image already
existed
S3 writes the image data using the scheme:
s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
where:
<USER> = ``s3_store_user``
<KEY> = ``s3_store_key``
<S3_HOST> = ``s3_store_host``
<BUCKET> = ``s3_store_bucket``
<ID> = The id of the image being added
"""
from boto.s3.connection import S3Connection
loc = StoreLocation({'scheme': self.scheme,
'bucket': self.bucket,
'key': image_id,
's3serviceurl': self.full_s3_host,
'accesskey': self.access_key,
'secretkey': self.secret_key})
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'))
create_bucket_if_missing(self.bucket, s3_conn, self.conf)
bucket_obj = get_bucket(s3_conn, self.bucket)
obj_name = str(image_id)
key = bucket_obj.get_key(obj_name)
if key and key.exists():
raise exception.Duplicate(_("S3 already has an image at "
"location %s") % loc.get_uri())
msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
"access_key=%(access_key)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': self.s3_host,
'access_key': self.access_key, 'bucket': self.bucket,
'obj_name': obj_name})
logger.debug(msg)
key = bucket_obj.new_key(obj_name)
# We need to wrap image_file, which is a reference to the
# webob.Request.body_file, with a seekable file-like object,
# otherwise the call to set_contents_from_file() will die
# with an error about Input object has no method 'seek'. We
# might want to call webob.Request.make_body_seekable(), but
# unfortunately, that method copies the entire image into
# memory and results in LP Bug #818292 occurring. So, here
# we write temporary file in as memory-efficient manner as
# possible and then supply the temporary file to S3. We also
# take this opportunity to calculate the image checksum while
# writing the tempfile, so we don't need to call key.compute_md5()
msg = _("Writing request body file to temporary file "
"for %s") % loc.get_uri()
logger.debug(msg)
tmpdir = self.s3_store_object_buffer_dir
temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
checksum = hashlib.md5()
chunk = image_file.read(self.CHUNKSIZE)
while chunk:
checksum.update(chunk)
temp_file.write(chunk)
chunk = image_file.read(self.CHUNKSIZE)
temp_file.flush()
msg = _("Uploading temporary file to S3 for %s") % loc.get_uri()
logger.debug(msg)
# OK, now upload the data into the key
key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False)
size = key.size
checksum_hex = checksum.hexdigest()
logger.debug(_("Wrote %(size)d bytes to S3 key named %(obj_name)s "
"with checksum %(checksum_hex)s") % locals())
return (loc.get_uri(), size, checksum_hex)
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
loc = location.store_location
from boto.s3.connection import S3Connection
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'))
bucket_obj = get_bucket(s3_conn, loc.bucket)
# Close the key when we're through.
key = get_key(bucket_obj, loc.key)
msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey, 'bucket': loc.bucket,
'obj_name': loc.key})
logger.debug(msg)
return key.delete()
def get_bucket(conn, bucket_id):
"""
Get a bucket from an s3 connection
:param conn: The ``boto.s3.connection.S3Connection``
:param bucket_id: ID of the bucket to fetch
:raises ``glance.exception.NotFound`` if bucket is not found.
"""
bucket = conn.get_bucket(bucket_id)
if not bucket:
msg = _("Could not find bucket with ID %(bucket_id)s") % locals()
logger.error(msg)
raise exception.NotFound(msg)
return bucket
def create_bucket_if_missing(bucket, s3_conn, conf):
"""
Creates a missing bucket in S3 if the
``s3_store_create_bucket_on_put`` option is set.
:param bucket: Name of bucket to create
:param s3_conn: Connection to S3
:param conf: Option mapping
"""
from boto.exception import S3ResponseError
try:
s3_conn.get_bucket(bucket)
except S3ResponseError, e:
if e.status == httplib.NOT_FOUND:
if conf.s3_store_create_bucket_on_put:
try:
s3_conn.create_bucket(bucket)
except S3ResponseError, e:
msg = ("Failed to add bucket to S3.\n"
"Got error from S3: %(e)s" % locals())
raise glance.store.BackendException(msg)
else:
msg = ("The bucket %(bucket)s does not exist in "
"S3. Please set the "
"s3_store_create_bucket_on_put option "
"to add bucket to S3 automatically."
% locals())
raise glance.store.BackendException(msg)
def get_key(bucket, obj):
"""
Get a key from a bucket
:param bucket: The ``boto.s3.Bucket``
:param obj: Object to get the key for
:raises ``glance.exception.NotFound`` if key is not found.
"""
key = bucket.get_key(obj)
if not key or not key.exists():
msg = _("Could not find key %(obj)s in bucket %(bucket)s") % locals()
logger.error(msg)
raise exception.NotFound(msg)
return key
glance.store.register_store(__name__, ['s3', 's3+http', 's3+https'])
| |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
# ###############################################################################################################################################
# xxd -i ros_topic_listener.py | sed 's/unsigned/const/g' | sed 's/ros_topic_listener_py/ROS_TOPIC_LISTENER_SCRIPT/g' > RosTopicListenerPython.h
# ###############################################################################################################################################
# make sure we aren't using floor division
from __future__ import division, print_function
NAME='rostopic'
import cogni_topic_listener
import os
import sys
import math
import socket
import time
import traceback
import yaml
import xmlrpclib
from operator import itemgetter
from urlparse import urlparse
import genpy
import roslib.message
import rosgraph
import rospy
class ROSTopicException(Exception):
"""
Base exception class of rostopic-related errors
"""
pass
class ROSTopicIOException(ROSTopicException):
"""
rostopic errors related to network I/O failures
"""
pass
def _check_master():
"""
Make sure that master is available
:raises: :exc:`ROSTopicException` If unable to successfully communicate with master
"""
try:
rosgraph.Master('/rostopic').getPid()
except socket.error:
raise ROSTopicIOException("Unable to communicate with master!")
def _master_get_topic_types(master):
try:
val = master.getTopicTypes()
except xmlrpclib.Fault:
#TODO: remove, this is for 1.1
sys.stderr.write("WARNING: rostopic is being used against an older version of ROS/roscore\n")
val = master.getPublishedTopics('/')
return val
def _sleep(duration):
rospy.rostime.wallsleep(duration)
def msgevalgen(pattern):
"""
Generates a function that returns the relevant field (aka 'subtopic') of a Message object
:param pattern: subtopic, e.g. /x. Must have a leading '/' if specified, ``str``
:returns: function that converts a message into the desired value, ``fn(Message) -> value``
"""
if not pattern or pattern == '/':
return None
def msgeval(msg):
# I will probably replace this with some less beautiful but more efficient
try:
return eval('msg'+'.'.join(pattern.split('/')))
except AttributeError as e:
sys.stdout.write("no field named [%s]"%pattern+"\n")
return None
return msgeval
def _get_topic_type(topic):
"""
subroutine for getting the topic type
:returns: topic type, real topic name and fn to evaluate the message instance
if the topic points to a field within a topic, e.g. /rosout/msg, ``(str, str, fn)``
"""
try:
val = _master_get_topic_types(rosgraph.Master('/rostopic'))
except socket.error:
raise ROSTopicIOException("Unable to communicate with master!")
# exact match first, followed by prefix match
matches = [(t, t_type) for t, t_type in val if t == topic]
if not matches:
matches = [(t, t_type) for t, t_type in val if topic.startswith(t+'/')]
# choose longest match
matches.sort(key=itemgetter(0), reverse=True)
if matches:
t, t_type = matches[0]
if t_type == rosgraph.names.ANYTYPE:
return None, None, None
return t_type, t, msgevalgen(topic[len(t):])
else:
return None, None, None
# NOTE: this is used externally by rxplot
def get_topic_type(topic, blocking=False):
"""
Get the topic type.
:param topic: topic name, ``str``
:param blocking: (default False) block until topic becomes available, ``bool``
:returns: topic type, real topic name and fn to evaluate the message instance
if the topic points to a field within a topic, e.g. /rosout/msg. fn is None otherwise. ``(str, str, fn)``
:raises: :exc:`ROSTopicException` If master cannot be contacted
"""
topic_type, real_topic, msg_eval = _get_topic_type(topic)
if topic_type:
return topic_type, real_topic, msg_eval
elif blocking:
sys.stderr.write("WARNING: topic [%s] does not appear to be published yet\n"%topic)
while not rospy.is_shutdown():
topic_type, real_topic, msg_eval = _get_topic_type(topic)
if topic_type:
return topic_type, real_topic, msg_eval
else:
_sleep(0.1)
return None, None, None
def get_topic_class(topic, blocking=False):
"""
Get the topic message class
:returns: message class for topic, real topic
name, and function for evaluating message objects into the subtopic
(or ``None``). ``(Message, str, str)``
:raises: :exc:`ROSTopicException` If topic type cannot be determined or loaded
"""
topic_type, real_topic, msg_eval = get_topic_type(topic, blocking=blocking)
if topic_type is None:
return None, None, None
msg_class = roslib.message.get_message_class(topic_type)
if not msg_class:
raise ROSTopicException("Cannot load message class for [%s]. Are your messages built?"%topic_type)
return msg_class, real_topic, msg_eval
class CallbackEcho(object):
"""
Callback instance that can print callback data in a variety of
formats. Used for all variants of rostopic echo
"""
def __init__(self, topic, msg_eval=None, plot=False, filter_fn=None,
echo_clear=False, echo_all_topics=False,
offset_time=False, count=None,
field_filter_fn=None):
"""
:param plot: if ``True``, echo in plotting-friendly format, ``bool``
:param filter_fn: function that evaluates to ``True`` if message is to be echo'd, ``fn(topic, msg)``
:param echo_all_topics: (optional) if ``True``, echo all messages in bag, ``bool``
:param offset_time: (optional) if ``True``, display time as offset from current time, ``bool``
:param count: number of messages to echo, ``None`` for infinite, ``int``
:param field_filter_fn: filter the fields that are strified for Messages, ``fn(Message)->iter(str)``
"""
if topic and topic[-1] == '/':
topic = topic[:-1]
self.topic = topic
self.msg_eval = msg_eval
self.plot = plot
self.filter_fn = filter_fn
self.prefix = ''
self.suffix = '' if not plot else ''# same as YAML document separator, bug #3291
self.echo_all_topics = echo_all_topics
self.offset_time = offset_time
# done tracks when we've exceeded the count
self.done = False
self.max_count = count
self.count = 0
# determine which strifying function to use
if plot:
#TODOXXX: need to pass in filter function
self.str_fn = _str_plot
self.sep = ''
else:
#TODOXXX: need to pass in filter function
self.str_fn = self.custom_strify_message
if echo_clear:
self.prefix = '\033[2J\033[;H'
self.field_filter=field_filter_fn
# first tracks whether or not we've printed anything yet. Need this for printing plot fields.
self.first = True
# cache
self.last_topic = None
self.last_msg_eval = None
def custom_strify_message(self, val, indent='', time_offset=None, current_time=None, field_filter=None, type_information=None):
# ensure to print uint8[] as array of numbers instead of string
if type_information and type_information.startswith('uint8['):
val = [ord(x) for x in val]
return genpy.message.strify_message(val, indent=indent, time_offset=time_offset, current_time=current_time, field_filter=field_filter)
def callback(self, data, callback_args, current_time=None):
"""
Callback to pass to rospy.Subscriber or to call
manually. rospy.Subscriber constructor must also pass in the
topic name as an additional arg
:param data: Message
:param topic: topic name, ``str``
:param current_time: override calculation of current time, :class:`genpy.Time`
"""
topic = callback_args['topic']
type_information = callback_args.get('type_information', None)
if self.filter_fn is not None and not self.filter_fn(data):
return
if self.max_count is not None and self.count >= self.max_count:
self.done = True
return
try:
msg_eval = self.msg_eval
if topic == self.topic:
pass
elif self.topic.startswith(topic + '/'):
# self.topic is actually a reference to topic field, generate msgeval
if topic == self.last_topic:
# use cached eval
msg_eval = self.last_msg_eval
else:
# generate msg_eval and cache
self.last_msg_eval = msg_eval = msgevalgen(self.topic[len(topic):])
self.last_topic = topic
elif not self.echo_all_topics:
return
if msg_eval is not None:
data = msg_eval(data)
# data can be None if msg_eval returns None
if data is not None:
# NOTE: we do all prints using direct writes to sys.stdout, which works better with piping
self.count += 1
str_output = ""
if self.offset_time:
str_output = self.prefix+self.str_fn(data, time_offset=rospy.get_rostime(),current_time=current_time, field_filter=self.field_filter, type_information=type_information)+self.suffix
else:
str_output = self.prefix+self.str_fn(data,current_time=current_time, field_filter=self.field_filter, type_information=type_information)+self.suffix
# print(str_output)
cogni_topic_listener.internal_topic_update(topic, str_output)
# if self.offset_time:
# sys.stdout.write(self.prefix+\
# self.str_fn(data, time_offset=rospy.get_rostime(),
# current_time=current_time, field_filter=self.field_filter, type_information=type_information) + \
# self.suffix + '\n')
# else:
# sys.stdout.write(self.prefix+\
# self.str_fn(data,
# current_time=current_time, field_filter=self.field_filter, type_information=type_information) + \
# self.suffix + '\n')
# we have to flush in order before piping to work
sys.stdout.flush()
# #2778 : have to check count after incr to set done flag
if self.max_count is not None and self.count >= self.max_count:
self.done = True
except IOError:
self.done = True
except:
# set done flag so we exit
self.done = True
traceback.print_exc()
def _rostopic_echo(topic, callback_echo, bag_file=None, echo_all_topics=False):
"""
Print new messages on topic to screen.
:param topic: topic name, ``str``
:param bag_file: name of bag file to echo messages from or ``None``, ``str``
"""
# we have to init a node regardless and bag echoing can print timestamps
if bag_file:
# initialize rospy time due to potential timestamp printing
rospy.rostime.set_rostime_initialized(True)
_rostopic_echo_bag(callback_echo, bag_file)
else:
_check_master()
rospy.init_node(NAME, anonymous=True)
msg_class, real_topic, msg_eval = get_topic_class(topic, blocking=True)
if msg_class is None:
# occurs on ctrl-C
return
callback_echo.msg_eval = msg_eval
# extract type information for submessages
type_information = None
if len(topic) > len(real_topic):
subtopic = topic[len(real_topic):]
subtopic = subtopic.strip('/')
if subtopic:
fields = subtopic.split('/')
submsg_class = msg_class
while fields:
field = fields[0].split('[')[0]
del fields[0]
index = submsg_class.__slots__.index(field)
type_information = submsg_class._slot_types[index]
if fields:
submsg_class = roslib.message.get_message_class(type_information)
use_sim_time = rospy.get_param('/use_sim_time', False)
sub = rospy.Subscriber(real_topic, msg_class, callback_echo.callback, {'topic': topic, 'type_information': type_information})
if use_sim_time:
# #2950: print warning if nothing received for two seconds
timeout_t = time.time() + 2.
while time.time() < timeout_t and \
callback_echo.count == 0 and \
not rospy.is_shutdown() and \
not callback_echo.done:
_sleep(0.1)
if callback_echo.count == 0 and \
not rospy.is_shutdown() and \
not callback_echo.done:
sys.stderr.write("WARNING: no messages received and simulated time is active.\nIs /clock being published?\n")
# while not rospy.is_shutdown() and not callback_echo.done:
# _sleep(0.1)
##########################################################################################
# COMMAND PROCESSING #####################################################################
def _rostopic_cmd_echo(argv):
def expr_eval(expr):
def eval_fn(m):
return eval(expr)
return eval_fn
args = argv[2:]
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog echo [options] /topic", prog=NAME)
parser.add_option("-b", "--bag",
dest="bag", default=None,
help="echo messages from .bag file", metavar="BAGFILE")
parser.add_option("-p",
dest="plot", default=False,
action="store_true",
help="echo in a plotting friendly format")
parser.add_option("--filter",
dest="filter_expr", default=None,
metavar="FILTER-EXPRESSION",
help="Python expression to filter messages that are printed. Expression can use Python builtins as well as m (the message) and topic (the topic name).")
parser.add_option("--nostr",
dest="nostr", default=False,
action="store_true",
help="exclude string fields")
parser.add_option("--noarr",
dest="noarr", default=False,
action="store_true",
help="exclude arrays")
parser.add_option("-c", "--clear",
dest="clear", default=False,
action="store_true",
help="clear screen before printing next message")
parser.add_option("-a", "--all",
dest="all_topics", default=False,
action="store_true",
help="display all message in bag, only valid with -b option")
parser.add_option("-n",
dest="msg_count", default=None, metavar="COUNT",
help="number of messages to echo")
parser.add_option("--offset",
dest="offset_time", default=False,
action="store_true",
help="display time as offsets from current time (in seconds)")
(options, args) = parser.parse_args(args)
if len(args) > 1:
parser.error("you may only specify one input topic")
if options.all_topics and not options.bag:
parser.error("Display all option is only valid when echoing from bag files")
if options.offset_time and options.bag:
parser.error("offset time option is not valid with bag files")
if options.all_topics:
topic = ''
else:
if len(args) == 0:
parser.error("topic must be specified")
topic = rosgraph.names.script_resolve_name('rostopic', args[0])
# suppressing output to keep it clean
#if not options.plot:
# print "rostopic: topic is [%s]"%topic
filter_fn = None
if options.filter_expr:
filter_fn = expr_eval(options.filter_expr)
try:
msg_count = int(options.msg_count) if options.msg_count else None
except ValueError:
parser.error("COUNT must be an integer")
field_filter_fn = create_field_filter(options.nostr, options.noarr)
callback_echo = CallbackEcho(topic, None, plot=options.plot,
filter_fn=filter_fn,
echo_clear=options.clear, echo_all_topics=options.all_topics,
offset_time=options.offset_time, count=msg_count,
field_filter_fn=field_filter_fn)
try:
_rostopic_echo(topic, callback_echo, bag_file=options.bag)
except socket.error:
sys.stderr.write("Network communication failed. Most likely failed to communicate with master.\n")
def create_field_filter(echo_nostr, echo_noarr):
def field_filter(val):
fields = val.__slots__
field_types = val._slot_types
for f, t in zip(val.__slots__, val._slot_types):
if echo_noarr and '[' in t:
continue
elif echo_nostr and 'string' in t:
continue
yield f
return field_filter
def cogni_topic_echo(topic, callback_echo):
"""
Print new messages on topic to screen.
:param topic: topic name, ``str``
:param bag_file: name of bag file to echo messages from or ``None``, ``str``
"""
# we have to init a node regardless and bag echoing can print timestamps
_check_master()
rospy.init_node(NAME, anonymous=True)
msg_class, real_topic, msg_eval = get_topic_class(topic, blocking=True)
if msg_class is None:
# occurs on ctrl-C
return
callback_echo.msg_eval = msg_eval
# extract type information for submessages
type_information = None
if len(topic) > len(real_topic):
subtopic = topic[len(real_topic):]
subtopic = subtopic.strip('/')
if subtopic:
fields = subtopic.split('/')
submsg_class = msg_class
while fields:
field = fields[0].split('[')[0]
del fields[0]
index = submsg_class.__slots__.index(field)
type_information = submsg_class._slot_types[index]
if fields:
submsg_class = roslib.message.get_message_class(type_information)
use_sim_time = rospy.get_param('/use_sim_time', False)
sub = rospy.Subscriber(real_topic, msg_class, callback_echo.callback, {'topic': topic, 'type_information': type_information})
_subscribers[topic] = sub
if use_sim_time:
# #2950: print warning if nothing received for two seconds
timeout_t = time.time() + 2.
while time.time() < timeout_t and \
callback_echo.count == 0 and \
not rospy.is_shutdown() and \
not callback_echo.done:
_sleep(0.1)
if callback_echo.count == 0 and \
not rospy.is_shutdown() and \
not callback_echo.done:
sys.stderr.write("WARNING: no messages received and simulated time is active.\nIs /clock being published?\n")
# while not rospy.is_shutdown() and not callback_echo.done:
# _sleep(0.1)
_active_listeners = {}
_subscribers = {}
def update_topics():
# Replace with topics from C++
topics = cogni_topic_listener.internal_get_topics()
# Add new topics to listeners
for new_topic in topics:
if new_topic not in _active_listeners:
add_topic_listener(new_topic)
# print("New topic added '" + new_topic + "'")
# Remove remove topics
for active_topic in _active_listeners.keys():
if active_topic not in topics:
_subscribers[active_topic].unregister()
del _active_listeners[active_topic]
del _subscribers[active_topic]
# print("Topic removed '" + active_topic + "'")
def add_topic_listener(topic):
callback_echo = CallbackEcho(topic, None)
cogni_topic_echo(topic, callback_echo)
_active_listeners[str(topic)] = callback_echo
def is_interruption_request():
return cogni_topic_listener.internal_interruption_requested()
# add_topic_listener("/scan/ranges[0:1]")
# add_topic_listener("/scan/ranges[1:2]")
# add_topic_listener("/scan/ranges[2:3]")
while True:
_sleep(1)
update_topics()
if (is_interruption_request()):
break
| |
"""
mfstr module. Contains the ModflowStr class. Note that the user can access
the ModflowStr class as `flopy.modflow.ModflowStr`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/str.htm>`_.
"""
import sys
import numpy as np
from ..utils import MfList
from ..pakbase import Package
class ModflowStr(Package):
"""
MODFLOW Stream Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
mxacts : int
Maximum number of stream reaches that will be in use during any stress
period. (default is 0)
nss : int
Number of stream segments. (default is 0)
ntrib : int
The number of stream tributaries that can connect to one segment. The
program is currently dimensioned so that NTRIB cannot exceed 10.
(default is 0)
ndiv : int
A flag, which when positive, specifies that diversions from segments
are to be simulated. (default is 0)
icalc : int
A flag, which when positive, specifies that stream stages in reaches
are to be calculated. (default is 0)
const : float
Constant value used in calculating stream stage in reaches whenever
ICALC is greater than 0. This constant is 1.486 for flow units of
cubic feet per second and 1.0 for units of cubic meters per second.
The constant must be multiplied by 86,400 when using time units of
days in the simulation. If ICALC is 0, const can be any real value.
(default is 86400.)
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 0).
istcb2 : int
A flag that is used flag and a unit number for the option to store
streamflow out of each reach in an unformatted (binary) file.
If istcb2 is greater than zero streamflow data will be saved.
(default is None).
dtype : tuple, list, or numpy array of numpy dtypes
is a tuple, list, or numpy array containing the dtype for
datasets 6 and 8 and the dtype for datasets 9 and 10 data in
stress_period_data and segment_data dictionaries.
(default is None)
stress_period_data : dictionary of reach data
Each dictionary contains a list of str reach data for a stress period.
Each stress period in the dictionary data contains data for
datasets 6 and 8.
The value for stress period data for a stress period can be an integer
(-1 or 0), a list of lists, a numpy array, or a numpy recarry. If
stress period data for a stress period contains an integer, a -1 denotes
data from the previous stress period will be reused and a 0 indicates
there are no str reaches for this stress period.
Otherwise stress period data for a stress period should contain mxacts
or fewer rows of data containing data for each reach. Reach data are
specified through definition of layer (int), row (int), column (int),
segment number (int), sequential reach number (int), flow entering a
segment (float), stream stage (float), streambed hydraulic conductance
(float), streambed bottom elevation (float), streambed top elevation
(float), stream width (float), stream slope (float), roughness
coefficient (float), and auxiliary variable data for auxiliary variables
defined in options (float).
If icalc=0 is specified, stream width, stream slope, and roughness
coefficients, are not used and can be any value for each stress period.
If data are specified for dataset 6 for a given stress period and icalc>0,
then stream width, stream slope, and roughness coefficients should be
appropriately set.
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. For example, if mxacts=3 this gives the form of::
stress_period_data =
{0: [
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough],
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough],
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]]
],
1: [
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough],
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough],
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]]
], ...
kper:
[
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough],
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough],
[lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]]
]
}
segment_data : dictionary of str segment data
Each dictionary contains a list of segment str data for a stress period.
Each stress period in the dictionary data contains data for
datasets 9, and 10. Segment data for a stress period are ignored if
a integer value is specified for stress period data.
The value for segment data for a stress period can be an integer
(-1 or 0), a list of lists, a numpy array, or a numpy recarry. If
segment data for a stress period contains an integer, a -1 denotes
data from the previous stress period will be reused and a 0 indicates
there are no str segments for this stress period.
Otherwise stress period data for a stress period should contain nss
rows of data containing data for each segment. Segment data are
specified through definition of itrib (int) data for up to 10 tributaries
and iupseg (int) data.
If ntrib=0 is specified, itrib values are not used and can be any value
for each stress period. If data are specified for dataset 6 for a given
stress period and ntrib>0, then itrib data should be specified for columns
0:ntrib.
If ndiv=0 is specified, iupseg values are not used and can be any value
for each stress period. If data are specified for dataset 6 for a given
stress period and ndiv>0, then iupseg data should be specified for the
column in the dataset [10].
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. For example, if nss=2 and ntrib>0 and/or ndiv>0 this gives the
form of::
segment_data =
{0: [
[itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg],
[itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg],
],
1: [
[itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg],
[itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg],
], ...
kper:
[
[itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg],
[itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg],
]
}
options : list of strings
Package options. (default is None).
extension : string
Filename extension (default is 'str')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output and sfr output name will be
created using the model name and .cbc the .sfr.bin/.sfr.out extensions
(for example, modflowtest.cbc, and modflowtest.sfr.bin), if ipakcbc and
istcb2 are numbers greater than zero. If a single string is passed
the package will be set to the string and cbc and sf routput names
will be created using the model name and .cbc and .sfr.bin/.sfr.out
extensions, if ipakcbc and istcb2 are numbers greater than zero. To
define the names for all package files (input and output) the length
of the list of strings should be 3. Default is None.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> strd = {}
>>> strd[0] = [[2, 3, 4, 15.6, 1050., -4]] #this river boundary will be
>>> #applied to all stress periods
>>> str8 = flopy.modflow.ModflowStr(m, stress_period_data=strd)
"""
def __init__(self, model, mxacts=0, nss=0, ntrib=0, ndiv=0, icalc=0,
const=86400., ipakcb=None, istcb2=None,
dtype=None, stress_period_data=None, segment_data=None,
extension='str', unitnumber=None, filenames=None,
options=None, **kwargs):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowStr.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None, None]
elif isinstance(filenames, str):
filenames = [filenames, None, None]
elif isinstance(filenames, list):
if len(filenames) < 3:
for idx in range(len(filenames), 3):
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowStr.ftype())
else:
ipakcb = 0
if istcb2 is not None:
fname = filenames[2]
model.add_output_file(istcb2, fname=fname,
package=ModflowStr.ftype())
else:
ipakcb = 0
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [ModflowStr.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'str.htm'
self.mxacts = mxacts
self.nss = nss
self.icalc = icalc
self.ntrib = ntrib
self.ndiv = ndiv
self.const = const
self.ipakcb = ipakcb
self.istcb2 = istcb2
# issue exception if ntrib is greater than 10
if ntrib > 10:
raise Exception('ModflowStr error: ntrib must be less that 10: ' +
'specified value = {}'.format(ntrib))
if options is None:
options = []
self.options = options
# parameters are not supported
self.npstr = 0
# determine dtype for dataset 6
if dtype is not None:
self.dtype = dtype[0]
self.dtype2 = dtype[1]
else:
auxnames = []
if len(options) > 0:
auxnames = []
it = 0
while True:
if 'aux' in options[it].lower():
aux_names.append(options[it + 1].lower())
it += 1
it += 1
if it > len(options):
break
if len(auxnames) < 1:
auxnames = None
d, d2 = self.get_empty(1, 1, aux_names=auxnames,
structured=self.parent.structured)
self.dtype = d.dtype
self.dtype2 = d2.dtype
# convert stress_period_data for datasets 6 and 8 to a recarray if necessary
if stress_period_data is not None:
for key, d in stress_period_data.items():
if isinstance(d, list):
d = np.array(d)
if isinstance(d, np.recarray):
assert d.dtype == self.dtype, 'ModflowStr error: recarray dtype: ' + \
str(
d.dtype) + ' does not match ' + \
'self dtype: ' + str(
self.dtype)
elif isinstance(d, np.ndarray):
d = np.core.records.fromarrays(d.transpose(),
dtype=self.dtype)
elif isinstance(d, int):
if model.verbose:
if d < 0:
print(
' reusing str data from previous stress period')
elif d == 0:
print(' no str data for stress period {}'.format(
key))
else:
raise Exception(
'ModflowStr error: unsupported data type: ' +
str(type(d)) + ' at kper ' +
'{0:d}'.format(key))
# add stress_period_data to package
self.stress_period_data = MfList(self, stress_period_data)
# convert segment_data for datasets 9 and 10 to a recarray if necessary
if segment_data is not None:
for key, d in segment_data.items():
if isinstance(d, list):
d = np.array(d)
if isinstance(d, np.recarray):
assert d.dtype == self.dtype2, 'ModflowStr error: recarray dtype: ' + \
str(
d.dtype) + ' does not match ' + \
'self dtype: ' + str(
self.dtype2)
elif isinstance(d, np.ndarray):
d = np.core.records.fromarrays(d.transpose(),
dtype=self.dtype2)
elif isinstance(d, int):
if model.verbose:
if d < 0:
print(
' reusing str segment data from previous stress period')
elif d == 0:
print(
' no str segment data for stress period {}'.format(
key))
else:
raise Exception(
'ModflowStr error: unsupported data type: ' +
str(type(d)) + ' at kper ' +
'{0:d}'.format(key))
# add stress_period_data to package
self.segment_data = segment_data
self.parent.add_package(self)
return
@staticmethod
def get_empty(ncells=0, nss=0, aux_names=None, structured=True):
# get an empty recarray that correponds to dtype
dtype, dtype2 = ModflowStr.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((ncells, len(dtype)), dtype=dtype)
d[:, :] = -1.0E+10
d2 = np.zeros((nss, len(dtype2)), dtype=dtype2)
d2[:, :] = 0
return (np.core.records.fromarrays(d.transpose(), dtype=dtype),
np.core.records.fromarrays(d2.transpose(), dtype=dtype2))
@staticmethod
def get_default_dtype(structured=True):
if structured:
dtype = np.dtype([("k", np.int), ("i", np.int), ("j", np.int),
("segment", np.int), ("reach", np.int),
("flow", np.float32), ("stage", np.float32),
("cond", np.float32), ("sbot", np.float32),
("stop", np.float32),
("width", np.float32), ("slope", np.float32),
("rough", np.float32)])
else:
dtype = np.dtype([("node", np.int),
("segment", np.int), ("reach", np.int),
("flow", np.float32), ("stage", np.float32),
("cond", np.float32), ("sbot", np.float32),
("stop", np.float32),
("width", np.float32), ("slope", np.float32),
("rough", np.float32)])
dtype2 = np.dtype([("itrib01", np.int), ("itrib02", np.int),
("itrib03", np.int), ("itrib04", np.int),
("itrib05", np.int), ("itrib06", np.int),
("itrib07", np.int), ("itrib08", np.int),
("itrib09", np.int), ("itrib10", np.int),
("iupseg", np.int)])
return dtype, dtype2
def ncells(self):
# Return the maximum number of cells that have a stream
# (developed for MT3DMS SSM package)
return self.mxacts
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
f_str = open(self.fn_path, 'w')
# dataset 0
f_str.write('{0}\n'.format(self.heading))
# dataset 1 - parameters not supported on write
# dataset 2
line = '{:10d}{:10d}{:10d}{:10d}{:10d}{:10.3f}{:10d}{:10d}'.format(
self.mxacts, self.nss,
self.ntrib, self.ndiv,
self.icalc, self.const,
self.ipakcb, self.istcb2)
for opt in self.options:
line += ' ' + str(opt)
line += '\n'
f_str.write(line)
# dataset 3 - parameters not supported on write
# dataset 4a - parameters not supported on write
# dataset 4b - parameters not supported on write
nrow, ncol, nlay, nper = self.parent.get_nrow_ncol_nlay_nper()
kpers = list(self.stress_period_data.data.keys())
kpers.sort()
if self.parent.bas6.ifrefm:
fmt6 = ['{:5d} ', '{:5d} ', '{:5d} ', '{:5d} ', '{:5d} ',
'{:15.7f} ', '{:15.7f} ', '{:15.7f} ', '{:15.7f} ',
'{:15.7f} ']
fmt8 = '{:15.7} '
fmt9 = '{:10d} '
else:
fmt6 = ['{:5d}', '{:5d}', '{:5d}', '{:5d}', '{:5d}',
'{:15.4f}', '{:10.3f}', '{:10.3f}', '{:10.3f}', '{:10.3f}']
fmt8 = '{:10.4g}'
fmt9 = '{:5d}'
for iper in range(nper):
if iper not in kpers:
if iper == 0:
itmp = 0
else:
itmp = -1
else:
tdata = self.stress_period_data[iper]
sdata = self.segment_data[iper]
if isinstance(tdata, int):
itmp = tdata
elif tdata is None:
itmp = -1
else:
itmp = tdata.shape[0]
line = '{:10d}{:10d}{:10d} # stress period {}\n'.format(itmp, 0,
0, iper)
f_str.write(line)
if itmp > 0:
# dataset 6
for line in tdata:
line['k'] += 1
line['i'] += 1
line['j'] += 1
for idx, v in enumerate(line):
if idx < 10:
f_str.write(fmt6[idx].format(v))
elif idx > 12:
f_str.write('{} '.format(v))
f_str.write('\n')
# dataset 8
if self.icalc > 0:
for line in tdata:
for idx in range(10, 13):
f_str.write(fmt8.format(line[idx]))
f_str.write('\n')
# dataset 9
if self.ntrib > 0:
for line in sdata:
for idx in range(3):
f_str.write(fmt9.format(line[idx]))
f_str.write('\n')
# dataset 10
if self.ndiv > 0:
for line in sdata:
f_str.write('{:10d}\n'.format(line[3]))
# close the str file
f_str.close()
@staticmethod
def load(f, model, nper=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
str : ModflowStr object
ModflowStr object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> strm = flopy.modflow.ModflowStr.load('test.str', m)
"""
if model.verbose:
sys.stdout.write('loading str package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# read dataset 1 - optional parameters
npstr, mxl = 0, 0
t = line.strip().split()
if t[0].lower() == 'parameter':
if model.verbose:
sys.stdout.write(' loading str dataset 1\n')
npstr = int(t[1])
mxl = int(t[2])
# read next line
line = f.readline()
# data set 2
if model.verbose:
sys.stdout.write(' loading str dataset 2\n')
t = line.strip().split()
mxacts = int(t[0])
nss = int(t[1])
ntrib = int(t[2])
ndiv = int(t[3])
icalc = int(t[4])
const = float(t[5])
istcb1 = int(t[6])
istcb2 = int(t[7])
ipakcb = 0
try:
if istcb1 != 0:
ipakcb = istcb1
model.add_pop_key_list(istcb1)
except:
pass
try:
if istcb2 != 0:
ipakcb = 53
model.add_pop_key_list(istcb2)
except:
pass
options = []
aux_names = []
if len(t) > 8:
it = 8
while it < len(t):
toption = t[it]
if 'aux' in toption.lower():
options.append(' '.join(t[it:it + 2]))
aux_names.append(t[it + 1].lower())
it += 1
it += 1
# read parameter data
if npstr > 0:
dt = ModflowStr.get_empty(1, aux_names=aux_names).dtype
pak_parms = mfparbc.load(f, npstr, dt, model.verbose)
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
stress_period_data = {}
segment_data = {}
for iper in range(nper):
if model.verbose:
print(" loading " + str(
ModflowStr) + " for kper {0:5d}".format(iper + 1))
line = f.readline()
if line == '':
break
t = line.strip().split()
itmp = int(t[0])
irdflg, iptflg = 0, 0
if len(t) > 1:
irdflg = int(t[1])
if len(t) > 2:
iptflg = int(t[2])
if itmp == 0:
bnd_output = None
seg_output = None
current, current_seg = ModflowStr.get_empty(itmp, nss,
aux_names=aux_names)
elif itmp > 0:
if npstr > 0:
partype = ['cond']
if model.verbose:
print(" reading str dataset 7")
for iparm in range(itmp):
line = f.readline()
t = line.strip().split()
pname = t[0].lower()
iname = 'static'
try:
tn = t[1]
c = tn.lower()
instance_dict = pak_parms.bc_parms[pname][1]
if c in instance_dict:
iname = c
else:
iname = 'static'
except:
pass
par_dict, current_dict = pak_parms.get(pname)
data_dict = current_dict[iname]
current = ModflowStr.get_empty(par_dict['nlst'],
aux_names=aux_names)
# get appropriate parval
if model.mfpar.pval is None:
parval = np.float(par_dict['parval'])
else:
try:
parval = np.float(
model.mfpar.pval.pval_dict[pname])
except:
parval = np.float(par_dict['parval'])
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
current[ibnd] = tuple(t[:len(current.dtype.names)])
else:
if model.verbose:
print(" reading str dataset 6")
current, current_seg = ModflowStr.get_empty(itmp, nss,
aux_names=aux_names)
for ibnd in range(itmp):
line = f.readline()
t = []
if model.free_format_input:
tt = line.strip().split()
# current[ibnd] = tuple(t[:len(current.dtype.names)])
for idx, v in enumerate(tt[:10]):
t.append(v)
for ivar in range(3):
t.append(-1.0E+10)
if len(aux_names) > 0:
for idx, v in enumerate(t[10:]):
t.append(v)
if len(tt) != len(current.dtype.names) - 3:
raise Exception
else:
ipos = [5, 5, 5, 5, 5, 15, 10, 10, 10, 10]
istart = 0
for ivar in range(len(ipos)):
istop = istart + ipos[ivar]
txt = line[istart:istop]
try:
t.append(float(txt))
except:
t.append(0.)
istart = istop
for ivar in range(3):
t.append(-1.0E+10)
if len(aux_names) > 0:
tt = line[istart:].strip().split()
for ivar in range(len(aux_names)):
t.append(tt[ivar])
current[ibnd] = tuple(t[:len(current.dtype.names)])
# convert indices to zero-based
current['k'] -= 1
current['i'] -= 1
current['j'] -= 1
# read dataset 8
if icalc > 0:
if model.verbose:
print(" reading str dataset 8")
for ibnd in range(itmp):
line = f.readline()
if model.free_format_input:
t = line.strip().split()
v = [float(vt) for vt in t[:3]]
else:
v = []
ipos = [10, 10, 10]
istart = 0
for ivar in range(len(ipos)):
istop = istart + ipos[ivar]
v.append(float(line[istart:istop]))
istart = istop + 1
ipos = 0
for idx in range(10, 13):
current[ibnd][idx] = v[ipos]
ipos += 1
bnd_output = np.recarray.copy(current)
# read data set 9
if ntrib > 0:
if model.verbose:
print(" reading str dataset 9")
for iseg in range(nss):
line = f.readline()
if model.free_format_input:
t = line.strip().split()
v = [float(vt) for vt in t[:ntrib]]
else:
v = []
ipos = 5
istart = 0
for ivar in range(ntrib):
istop = istart + ipos
try:
v.append(float(line[istart:istop]))
except:
v.append(0.)
istart = istop
for idx in range(ntrib):
current_seg[iseg][idx] = v[idx]
# read data set 10
if ndiv > 0:
if model.verbose:
print(" reading str dataset 10")
for iseg in range(nss):
line = f.readline()
if model.free_format_input:
t = line.strip().split()
v = float(t[0])
else:
ipos = 10
istart = 0
for ivar in range(ndiv):
istop = istart + ipos
v = float(line[istart:istop])
istart = istop
current_seg[iseg][10] = v
seg_output = np.recarray.copy(current_seg)
else:
bnd_output = -1
seg_output = -1
if bnd_output is None:
stress_period_data[iper] = itmp
segment_data[iper] = itmp
else:
stress_period_data[iper] = bnd_output
segment_data[iper] = seg_output
# determine specified unit number
unitnumber = None
filenames = [None, None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowStr.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if abs(istcb2) > 0:
iu, filenames[2] = \
model.get_ext_dict_attr(ext_unit_dict, unit=abs(istcb2))
strpak = ModflowStr(model, mxacts=mxacts, nss=nss,
ntrib=ntrib, ndiv=ndiv, icalc=icalc,
const=const, ipakcb=ipakcb, istcb2=istcb2,
stress_period_data=stress_period_data,
segment_data=segment_data,
options=options, unitnumber=unitnumber,
filenames=filenames)
return strpak
@staticmethod
def ftype():
return 'STR'
@staticmethod
def defaultunit():
return 118
| |
# -*- coding: utf-8 -*-
import mimetypes
import re
import json
from html.parser import HTMLParser
from html.entities import name2codepoint
from html import escape
from .exceptions import NotAllowedTag, InvalidHTML
RE_WHITESPACE = re.compile(r'(\s+)', re.UNICODE)
ALLOWED_TAGS = {
'a', 'aside', 'b', 'blockquote', 'br', 'code', 'em', 'figcaption', 'figure',
'h3', 'h4', 'hr', 'i', 'iframe', 'img', 'li', 'ol', 'p', 'pre', 's',
'strong', 'u', 'ul', 'video'
}
VOID_ELEMENTS = {
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'
}
BLOCK_ELEMENTS = {
'address', 'article', 'aside', 'blockquote', 'canvas', 'dd', 'div', 'dl',
'dt', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2',
'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'li', 'main', 'nav',
'noscript', 'ol', 'output', 'p', 'pre', 'section', 'table', 'tfoot', 'ul',
'video'
}
class HtmlToNodesParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.nodes = []
self.current_nodes = self.nodes
self.parent_nodes = []
self.last_text_node = None
self.tags_path = []
def add_str_node(self, s):
if not s:
return
if 'pre' not in self.tags_path: # keep whitespace in <pre>
s = RE_WHITESPACE.sub(' ', s)
if self.last_text_node is None or self.last_text_node.endswith(' '):
s = s.lstrip(' ')
if not s:
self.last_text_node = None
return
self.last_text_node = s
if self.current_nodes and isinstance(self.current_nodes[-1], str):
self.current_nodes[-1] += s
else:
self.current_nodes.append(s)
def handle_starttag(self, tag, attrs_list):
if tag not in ALLOWED_TAGS:
raise NotAllowedTag(f'{tag!r} tag is not allowed')
if tag in BLOCK_ELEMENTS:
self.last_text_node = None
node = {'tag': tag}
self.tags_path.append(tag)
self.current_nodes.append(node)
if attrs_list:
attrs = {}
node['attrs'] = attrs
for attr, value in attrs_list:
attrs[attr] = value
if tag not in VOID_ELEMENTS:
self.parent_nodes.append(self.current_nodes)
self.current_nodes = node['children'] = []
def handle_endtag(self, tag):
if tag in VOID_ELEMENTS:
return
if not len(self.parent_nodes):
raise InvalidHTML(f'{tag!r} missing start tag')
self.current_nodes = self.parent_nodes.pop()
last_node = self.current_nodes[-1]
if last_node['tag'] != tag:
raise InvalidHTML(f'{tag!r} tag closed instead of {last_node["tag"]!r}')
self.tags_path.pop()
if not last_node['children']:
last_node.pop('children')
def handle_data(self, data):
self.add_str_node(data)
def handle_entityref(self, name):
self.add_str_node(chr(name2codepoint[name]))
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
self.add_str_node(c)
def get_nodes(self):
if self.parent_nodes:
not_closed_tag = self.parent_nodes[-1][-1]['tag']
raise InvalidHTML(f'{not_closed_tag!r} tag is not closed')
return self.nodes
def html_to_nodes(html_content):
parser = HtmlToNodesParser()
parser.feed(html_content)
return parser.get_nodes()
def nodes_to_html(nodes):
out = []
append = out.append
stack = []
curr = nodes
i = -1
while True:
i += 1
if i >= len(curr):
if not stack:
break
curr, i = stack.pop()
append(f'</{curr[i]["tag"]}>')
continue
node = curr[i]
if isinstance(node, str):
append(escape(node))
continue
append(f'<{node["tag"]}')
if node.get('attrs'):
for attr, value in node['attrs'].items():
append(f' {attr}="{escape(value)}"')
if node.get('children'):
append('>')
stack.append((curr, i))
curr, i = node['children'], -1
continue
if node["tag"] in VOID_ELEMENTS:
append('/>')
else:
append(f'></{node["tag"]}>')
return ''.join(out)
class FilesOpener(object):
def __init__(self, paths, key_format='file{}'):
if not isinstance(paths, list):
paths = [paths]
self.paths = paths
self.key_format = key_format
self.opened_files = []
def __enter__(self):
return self.open_files()
def __exit__(self, type, value, traceback):
self.close_files()
def open_files(self):
self.close_files()
files = []
for x, file_or_name in enumerate(self.paths):
name = ''
if isinstance(file_or_name, tuple) and len(file_or_name) >= 2:
name = file_or_name[1]
file_or_name = file_or_name[0]
if hasattr(file_or_name, 'read'):
f = file_or_name
if hasattr(f, 'name'):
filename = f.name
else:
filename = name
else:
filename = file_or_name
f = open(filename, 'rb')
self.opened_files.append(f)
mimetype = mimetypes.MimeTypes().guess_type(filename)[0]
files.append(
(self.key_format.format(x), ('file{}'.format(x), f, mimetype))
)
return files
def close_files(self):
for f in self.opened_files:
f.close()
self.opened_files = []
def json_dumps(*args, **kwargs):
return json.dumps(*args, **kwargs, separators=(',', ':'), ensure_ascii=False)
| |
import os
import sys
import time
import json
import struct
import numpy as np
from collections import OrderedDict
from ..memex_tools.sha1_tools import get_SHA1_from_file, get_SHA1_from_data
#from ..hasher import _hasher_obj_py as hop
class DictOutput():
def __init__(self, mode='Old'):
self.map = dict()
if mode == 'CamelCase':
self.fillDictCamelCase()
else:
self.fillDictOld()
def fillDictCamelCase(self):
self.map['images'] = "Images"
self.map['query_sha1'] = "QuerySha1"
self.map['similar_images'] = "SimilarImages"
self.map['ads_cdr_ids'] = "AdsCDRIds"
self.map['cdr_ids'] = "CDRIds"
self.map['distance'] = "Distance"
self.map['number'] = "Number"
self.map['sha1'] = "Sha1"
self.map['cached_image_urls'] = "CachedImageURLs"
def fillDictOld(self):
self.map['images'] = "images"
self.map['query_sha1'] = "query_sha1"
self.map['similar_images'] = "similar_images"
self.map['ads_cdr_ids'] = "ads_cdr_ids"
self.map['cdr_ids'] = "cdr_ids"
self.map['distance'] = "distance"
self.map['number'] = "number"
self.map['sha1'] = "sha1"
self.map['cached_image_urls'] = "cached_image_urls"
class Searcher():
def __init__(self, global_conf_filename):
self.global_conf_filename = global_conf_filename
self.global_conf = json.load(open(global_conf_filename,'rt'))
self.read_conf()
self.init_indexer()
#self.init_ingester() # deprecated
self.needed_output_columns = ['info:s3_url']
def read_conf(self):
# these parameters may be overwritten by web call
self.features_dim = self.global_conf['FE_features_dim']
self.sim_limit = self.global_conf['SE_sim_limit']
self.near_dup = self.global_conf['SE_near_dup']
self.near_dup_th = self.global_conf['SE_near_dup_th']
self.get_dup = self.global_conf['SE_get_dup']
# this ratio could be dynamic to actually target a number of images to be retrieved.
self.ratio = self.global_conf['SE_ratio']
self.topfeature = 0
if "SE_topfeature" in self.global_conf:
self.topfeature = int(self.global_conf['SE_topfeature'])
self.out_dir = ""
if "SE_outdir" in self.global_conf:
self.out_dir = self.global_conf['SE_outdir']
from ..memex_tools.image_dl import mkpath
mkpath(self.out_dir)
def init_ingester(self):
""" Initialize `SE_ingester` from `global_conf['ingester']` value.
Currently supported ingester types are:
- mysql_ingester
- cdr_ingester
"""
field = 'SE_ingester'
if field not in self.global_conf:
raise ValueError("[Searcher: error] "+field+" is not defined in configuration file.")
if self.global_conf[field]=="mysql_ingester":
from ..ingester.mysql_ingester import MySQLIngester
self.ingester = MySQLIngester(self.global_conf_filename)
elif self.global_conf[field]=="cdr_ingester":
from ..ingester.cdr_ingester import CDRIngester
self.ingester = CDRIngester(self.global_conf_filename)
elif self.global_conf[field]=="hbase_ingester":
from ..ingester.hbase_ingester import HBaseIngester
self.ingester = HBaseIngester(self.global_conf_filename)
else:
raise ValueError("[Searcher: error] unkown 'ingester' {}.".format(self.global_conf[field]))
def init_indexer(self):
""" Initialize `indexer` from `global_conf['SE_indexer']` value.
Currently supported indexer types are:
- local_indexer
- hbase_indexer
"""
field = 'SE_indexer'
if field not in self.global_conf:
raise ValueError("[Searcher: error] "+field+" is not defined in configuration file.")
if self.global_conf[field]=="local_indexer":
from ..indexer.local_indexer import LocalIndexer
self.indexer = LocalIndexer(self.global_conf_filename)
elif self.global_conf[field]=="hbase_indexer":
from ..indexer.hbase_indexer import HBaseIndexer
self.indexer = HBaseIndexer(self.global_conf_filename)
else:
raise ValueError("[Searcher: error] unkown 'indexer' {}.".format(self.global_conf[field]))
def check_ratio(self):
'''Check if we need to set the ratio based on topfeature.'''
if self.topfeature > 0:
self.ratio = self.topfeature*1.0/len(self.indexer.sha1_featid_mapping)
print "[Searcher.check_ratio: log] Set ratio to {} as we want top {} images out of {} indexed.".format(self.ratio, self.topfeature, len(self.indexer.sha1_featid_mapping))
def compute_features_listimgfiles(self, listimgfiles, search_id):
# we could switch between GPU and CPU based on number of images.
# TEMPORARY USE ONLY CPU as GPU card will be changed soon
#features_filename, ins_num = self.indexer.feature_extractor.compute_features(listimgfiles, search_id, 'CPU')
# device now set from conf file
features_filename, ins_num = self.indexer.feature_extractor.compute_features(listimgfiles, search_id)
if ins_num != len(listimgfiles):
print_err = "[Searcher.compute_features_listimgfiles: error] We did not get enough features ({}) from list of {} images."
raise ValueError(print_err.format(ins_num,len(listimgfiles)))
return features_filename
def filter_near_dup(self, nums, near_dup_th=None):
# nums is a list of ids then distances
# onum is the number of similar images
if not near_dup_th:
near_dup_th = self.near_dup_th
#print("[filter_near_dup] near_dup_th: {}".format(near_dup_th))
onum = len(nums)/2
temp_nums = []
#print "[Searcher.filter_near_dup: log] nums {}".format(nums)
for one_num in range(0,onum):
# maintain only near duplicates, i.e. distance less than self.near_dup_th
if float(nums[onum+one_num])>near_dup_th:
return temp_nums
# insert id at its right place
temp_nums.insert(one_num,nums[one_num])
# insert corresponding distance at the end
temp_nums.insert(len(temp_nums),nums[onum+one_num])
#print "[Searcher.filter_near_dup: log] temp_nums {}".format(temp_nums)
return temp_nums
def read_sim(self, simname, nb_query, options_dict=dict()):
# intialization
sim = []
sim_score = []
# read similar images
count = 0
f = open(simname)
for line in f:
#sim_index.append([])
nums = line.replace(' \n','').split(' ')
#filter near duplicate here
if (self.near_dup and "near_dup" not in options_dict) or ("near_dup" in options_dict and options_dict["near_dup"]):
if "near_dup_th" in options_dict:
near_dup_th = options_dict["near_dup_th"]
else:
near_dup_th = self.near_dup_th
nums = self.filter_near_dup(nums, near_dup_th)
#print nums
onum = len(nums)/2
n = min(self.sim_limit,onum)
#print n
if n==0: # no returned images, e.g. no near duplicate
sim.append(())
sim_score.append([])
continue
# get the needed_output_columns and sha1s
sim_infos = self.indexer.get_sim_infos(nums[0:n], columns=self.needed_output_columns)
# beware, need to make sure sim and sim_score are still aligned
print("[read_sim] got {} sim_infos from {} samples".format(len(sim_infos), n))
sim.append(sim_infos)
sim_score.append(nums[onum:onum+n])
count = count + 1
if count == nb_query:
break
f.close()
return sim,sim_score
def read_sim_nodiskout(self, out_res, nb_query, options_dict=dict()):
# intialization
sim = []
sim_score = []
# read similar images
count = 0
for one_res in out_res:
print("[read_sim_nodiskout: log] type(one_res): {}".format(type(one_res)))
print("[read_sim_nodiskout: log] one_res: {}".format(one_res))
# one_res would ned to be a numpy array
nums = [one_res[1::2], one_res[::2]]
#filter near duplicate here
if (self.near_dup and "near_dup" not in options_dict) or ("near_dup" in options_dict and options_dict["near_dup"]):
if "near_dup_th" in options_dict:
near_dup_th = options_dict["near_dup_th"]
else:
near_dup_th = self.near_dup_th
nums = self.filter_near_dup(nums, near_dup_th)
#print nums
onum = len(nums)/2
n = min(self.sim_limit,onum)
#print n
if n==0: # no returned images, e.g. no near duplicate
sim.append(())
sim_score.append([])
continue
# just get the sha1 at this point
sim_infos = self.indexer.get_sim_infos(nums[0:n], columns=self.needed_output_columns)
# beware, need to make sure sim and sim_score are still aligned
print("[read_sim] got {} sim_infos from {} samples".format(len(sim_infos), n))
sim.append(sim_infos)
sim_score.append(nums[onum:onum+n])
count = count + 1
if count == nb_query:
break
return sim,sim_score
def read_sim_sha1(self, simname, nb_query, options_dict=dict()):
# intialization
sim = []
sim_score = []
# read similar images
count = 0
f = open(simname);
for line in f:
#sim_index.append([])
nums = line.replace('\n','').split(' ')
#filter near duplicate here
if (self.near_dup and "near_dup" not in options_dict) or ("near_dup" in options_dict and options_dict["near_dup"]):
if "near_dup_th" in options_dict:
near_dup_th = options_dict["near_dup_th"]
else:
near_dup_th = self.near_dup_th
nums = self.filter_near_dup(nums, near_dup_th)
#print nums
onum = len(nums)/2
n = min(self.sim_limit,onum)
#print n
if n==0: # no returned images, e.g. no near duplicate
sim.append(())
sim_score.append([])
continue
# just get the sha1 at this point
# beware, need to make sure sim and sim_score are still aligned
#sim.append(self.indexer.get_full_sha1_rows(nums[0:n]))
sim.append(self.indexer.get_columns_from_sha1_rows(nums[0:n], columns=self.needed_output_columns))
sim_score.append(nums[onum:onum+n])
count = count + 1
if count == nb_query:
break
f.close()
return sim,sim_score
def build_output(self, nb_query, corrupted, list_sha1_id, sim, sim_score, options_dict=dict()):
# method that takes as input: nb_query, corrupted, list_sha1_id, sim, sim_score, options_dict=dict()
#print "[Searcher.format_output: log] sim: {}".format(sim)
# build final output
# options_dict could be used to request more output infos 'cdr_ids' etc
dec = 0
output = []
do = DictOutput()
#needed_columns = ['info:s3_url', 'info:all_cdr_ids', 'info:all_parent_ids']
for i in range(0,nb_query):
output.append(dict())
if i in corrupted:
output[i][do.map['similar_images']] = OrderedDict([[do.map['number'],0],\
[do.map['sha1'],[]],\
[do.map['cached_image_urls'],[]],\
[do.map['cdr_ids'],[]],\
[do.map['ads_cdr_ids'],[]],\
[do.map['distance'],[]]])
dec += 1
continue
ii = i - dec
output[i][do.map['similar_images']] = OrderedDict([[do.map['number'],len(sim[ii])],\
[do.map['sha1'],[]],\
[do.map['cached_image_urls'],[]],\
[do.map['cdr_ids'],[]],\
[do.map['ads_cdr_ids'],[]],\
[do.map['distance'],[]]])
output[i][do.map['query_sha1']] = list_sha1_id[ii]
ok_sims = []
for jj,simj in enumerate(sim[ii]):
found_columns = [c in simj[1] for c in self.needed_output_columns]
if found_columns.count(True) == len(self.needed_output_columns):
output[i][do.map['similar_images']][do.map['sha1']].append(simj[0].strip())
output[i][do.map['similar_images']][do.map['cached_image_urls']].append(simj[1]['info:s3_url'].strip())
#output[i]['similar_images']['cdr_ids'].append(simj[1]['info:all_cdr_ids'].strip())
#output[i]['similar_images']['ads_cdr_ids'].append(simj[1]['info:all_parent_ids'].strip())
ok_sims.append(jj)
#else:
# print "[Searcher.format_output: log] Found invalid image: {}. found_columns: {}".format(simj[0],found_columns)
output[i][do.map['similar_images']][do.map['distance']]=[sim_score[ii][jj] for jj in ok_sims]
outp = OrderedDict([[do.map['number'],nb_query],[do.map['images'],output]])
return outp
def build_error_output(self, nb_query, inst):
errors = dict()
errors['search'] = "[format_output ERROR] could not prepare output. Error was: {}".format(inst)
output = []
do = DictOutput()
outp = OrderedDict([[do.map['number'],nb_query],[do.map['images'],output],['errors',errors]])
return outp
def format_output(self, simname, nb_query, corrupted, list_sha1_id, options_dict=dict()):
# read hashing similarity results and get 'cached_image_urls', 'cdr_ids', 'ads_cdr_ids'
print "[Searcher.format_output: log] options are: {}".format(options_dict)
start_read_sim = time.time()
if 'sha1_sim' in options_dict:
sha1sim = options_dict['sha1_sim']
else:
sha1sim = False
try:
if sha1sim:
sim,sim_score = self.read_sim_sha1(simname, nb_query, options_dict)
else:
sim,sim_score = self.read_sim(simname, nb_query, options_dict)
except Exception as inst:
print "[Searcher.format_output: error] {}".format(inst)
return self.build_error_output(nb_query, inst)
print "[Searcher.format_output: log] read_sim took: {}".format(time.time() - start_read_sim)
start_build_output = time.time()
outp = self.build_output(nb_query, corrupted, list_sha1_id, sim, sim_score, options_dict)
print "[Searcher.format_output: log] build_output took: {}".format(time.time() - start_build_output)
#print "[Searcher.format_output: log] output {}".format(output)
return outp
def format_output_nodiskout(self, out_res, nb_query, corrupted, list_sha1_id, options_dict=dict()):
# read similarity results from memory
print "[Searcher.format_output_nodiskout: log] options are: {}".format(options_dict)
try:
sim, sim_score = self.read_sim_nodiskout(out_res, nb_query, options_dict)
except Exception as inst:
return self.build_error_output(nb_query, inst)
outp = self.build_output(nb_query, corrupted, list_sha1_id, sim, sim_score, options_dict)
return outp
def search_one_imagepath(self,image_path):
# initialization
search_id = str(time.time())
all_img_filenames = [image_path]
return self.search_from_image_filenames(all_img_filenames,search_id)
def search_image_filelist(self, image_list, options_dict=dict()):
# initilization
search_id = str(time.time())
i = 0
# read all images
dl_images = []
batch = []
all_img_filenames = []
for line in open(image_list):
image_line = line.replace('\n','')
if len(image_line)>2:
# Check if image or web address
if image_line[0:4]=="http":
# Push image to be downloaded image
batch.append((i,image_line,None))
dl_images.append(i)
all_img_filenames.append(image_line)
i+=1
#print "[Searcher.search_image_list: log] all_img_filenames: {}.".format(all_img_filenames)
# download the images we need
if batch:
readable_images = self.indexer.image_downloader.download_images(batch, search_id)
for i,img_tup in enumerate(readable_images):
#print "[Searcher.search_image_list: log] {} readable image tuple {}.".format(i,img_tup)
dl_pos = dl_images.index(img_tup[0])
all_img_filenames[dl_images[dl_pos]]=img_tup[-1]
#print "[Searcher.search_image_list: log] all_img_filenames: {}.".format(all_img_filenames)
print("[search_image_filelist: log] options_dict: {}".format(options_dict))
print "[search_image_filelist: log] Searching for {} query images.".format(all_img_filenames)
if "no_diskout" in options_dict and options_dict["no_diskout"]:
# print("[search_image_filelist: log] using no_diskout")
# outp, outputname = self.search_from_image_filenames_nodiskout(all_img_filenames, search_id, options_dict)
print("[search_image_filelist: log] using no_diskout is not yet fully supported. calling search_from_image_filenames anyway")
outp, outputname = self.search_from_image_filenames(all_img_filenames, search_id, options_dict)
else:
outp, outputname = self.search_from_image_filenames(all_img_filenames, search_id, options_dict)
return outputname
def search_image_list(self, image_list, options_dict=dict()):
# initilization
start_search = time.time()
search_id = str(start_search)
i = 0
# read all images
dl_images = []
batch = []
all_img_filenames = []
for line in image_list:
image_line = line.replace('\n','')
if len(image_line)>2:
# Check if image or web address
if image_line[0:4]=="http":
# Push image to be downloaded image
batch.append((i,image_line,None))
dl_images.append(i)
all_img_filenames.append(image_line)
i+=1
#print "[Searcher.search_image_list: log] all_img_filenames: {}.".format(all_img_filenames)
# download the images we need
if batch:
readable_images = self.indexer.image_downloader.download_images(batch, search_id)
if not readable_images:
return {'error': 'could not download any image.'}
for i,img_tup in enumerate(readable_images):
#print "[Searcher.search_image_list: log] {} readable image tuple {}.".format(i,img_tup)
dl_pos = dl_images.index(img_tup[0])
all_img_filenames[dl_images[dl_pos]]=img_tup[-1]
#print "[Searcher.search_image_list: log] all_img_filenames: {}.".format(all_img_filenames)
print "[Searcher.search_image_filelist: log] Search prepared in {}s.".format(time.time() - start_search)
#outp, outputname = self.search_from_image_filenames(all_img_filenames, search_id, options_dict)
print "[Searcher.search_image_filelist: log] options_dict: {}".format(options_dict)
if "no_diskout" in options_dict:
print("[search_image_filelist: log] using no_diskout")
outp, outputname = self.search_from_image_filenames_nodiskout(all_img_filenames, search_id, options_dict)
else:
outp, outputname = self.search_from_image_filenames(all_img_filenames, search_id, options_dict)
return outp
def search_from_image_filenames_nocache(self, all_img_filenames, search_id, options_dict=dict()):
corrupted = []
valid_img_filenames = []
valid_img = []
list_sha1_id = []
outp = {}
for i, image_name in enumerate(all_img_filenames):
if image_name[0:4]!="http":
sha1 = get_SHA1_from_file(image_name)
if sha1:
list_sha1_id.append(sha1)
valid_img.append((i,sha1,image_name))
valid_img_filenames.append(image_name)
else:
corrupted.append(i)
else: # we did not manage to download image
# need to deal with that in output formatting too
corrupted.append(i)
if valid_img_filenames:
features_filename = self.compute_features_listimgfiles(valid_img_filenames, search_id)
#features_filename, ins_num = self.indexer.feature_extractor.compute_features(valid_img_filenames, search_id)
#if ins_num!=len(valid_img_filenames):
# raise ValueError("[Searcher.search_from_image_filenames_nocache: error] We did not get enough features ({}) from list of {} images.".format(ins_num,len(new_files)))
# query with features_filename
self.check_ratio()
simname = self.indexer.hasher.get_similar_images_from_featuresfile(str(features_filename), self.ratio)
outp = self.format_output(simname, len(all_img_filenames), corrupted, list_sha1_id, options_dict)
# cleanup
os.remove(simname)
return outp
def search_from_image_filenames(self, all_img_filenames, search_id, options_dict=dict()):
# compute all sha1s
start_search = time.time()
corrupted = []
list_sha1_id = []
valid_images = []
for i,image_name in enumerate(all_img_filenames):
if image_name[0:4]!="http":
sha1 = get_SHA1_from_file(image_name)
if sha1:
list_sha1_id.append(sha1)
valid_images.append((i,sha1,image_name))
else:
print("[Searcher.search_from_image_filenames: log] image {} is corrupted.".format(image_name))
corrupted.append(i)
else: # we did not manage to download image
# need to deal with that in output formatting too
corrupted.append(i)
#print "[Searcher.search_from_image_filenames: log] valid_images {}".format(valid_images)
# get indexed images
list_ids_sha1_found = self.indexer.get_ids_from_sha1s(list_sha1_id)
print "[Searcher.search_from_image_filenames: log] list_ids_sha1_found {}".format(list_ids_sha1_found)
tmp_list_ids_found = [x[0] for x in list_ids_sha1_found if x[0] is not None]
list_sha1_found = [x[1] for x in list_ids_sha1_found if x[0] is not None]
#print "[Searcher.search_from_image_filenames: log] list_sha1_id {}".format(list_sha1_id)
#print "[Searcher.search_from_image_filenames: log] list_sha1_found {}".format(list_sha1_found)
# this is to keep proper ordering
list_ids_found = [tmp_list_ids_found[list_sha1_found.index(sha1)] for sha1 in list_sha1_id if sha1 in list_sha1_found]
#print "[Searcher.search_from_image_filenames: log] tmp_list_ids_found {}".format(tmp_list_ids_found)
print "[Searcher.search_from_image_filenames: log] list_ids_found {}".format(list_ids_found)
if list_ids_found:
# get the features, hasher starts to count at 1
feats,ok_ids = self.indexer.hasher.get_precomp_feats([x+1 for x in list_ids_found])
if len(ok_ids)!=len(list_ids_found):
raise ValueError("[Searcher.search_from_image_filenames: error] We did not get enough precomputed features ({}) from list of {} images.".format(len(ok_ids),len(list_ids_found)))
# compute new images features
not_indexed_sha1 = set(list_sha1_id)-set(list_sha1_found)
#res = self.indexer.get_precomp_from_sha1(list_ids_sha1_found)
new_files = []
all_valid_images = []
precomp_img_filenames=[]
for i,sha1,image_name in valid_images:
if sha1 in list_sha1_found: # image is indexed
precomp_img_filenames.append(image_name)
else:
new_files.append(image_name)
all_valid_images.append(all_img_filenames[i])
# check images are jpeg (and convert them here?)
print "[Searcher.search_from_image_filenames: log] all_valid_images {}".format(all_valid_images)
print "[Searcher.search_from_image_filenames: log] new_files {}".format(new_files)
features_filename = self.compute_features_listimgfiles(new_files, search_id)
#features_filename,ins_num = self.indexer.feature_extractor.compute_features(new_files,search_id)
#if ins_num!=len(new_files):
# raise ValueError("[Searcher.search_from_image_filenames: error] We did not get enough features ({}) from list of {} images.".format(ins_num,len(new_files)))
# merge feats with features_filename
# TODO: prefix with path
final_featuresfile = str(os.path.join(self.out_dir, search_id+'.dat'))
read_dim = self.features_dim*4
read_type = np.float32
features_wrote = 0
#print "[Searcher.search_from_image_filenames: log] feats {}".format(feats)
with open(features_filename,'rb') as new_feats, open(final_featuresfile,'wb') as out:
for image_name in all_valid_images:
#print "[Searcher.search_from_image_filenames: log] saving feature of image {}".format(image_name)
if image_name in precomp_img_filenames:
# select precomputed
precomp_pos = precomp_img_filenames.index(image_name)
#print "[Searcher.search_from_image_filenames: log] getting precomputed feature at position {}".format(precomp_pos)
tmp_feat = feats[precomp_pos][:]
else:
# read from new feats
tmp_feat = np.frombuffer(new_feats.read(read_dim),dtype=read_type)
print "[Searcher.search_from_image_filenames: log] tmp_feat for image {} has norm {} and is: {}".format(image_name,np.linalg.norm(tmp_feat),tmp_feat)
out.write(tmp_feat)
features_wrote += 1
print "[Searcher.search_from_image_filenames: log] Search prepared in {}s".format(time.time() - start_search)
if features_wrote:
if "near_dup_th" in options_dict:
near_dup_th = options_dict["near_dup_th"]
else:
near_dup_th = self.near_dup_th
# Compute ratio from topfeature if set
self.check_ratio()
simname = self.indexer.hasher.get_similar_images_from_featuresfile(final_featuresfile, self.ratio, near_dup_th=float(near_dup_th))
outputname = simname[:-4]+".json"
start_format = time.time()
outp = self.format_output(simname, len(all_img_filenames), corrupted, list_sha1_id, options_dict)
print "[Searcher.search_from_image_filenames: log] Formatting done in {}s".format(time.time() - start_format)
print "[Searcher.search_from_image_filenames: log] saving output to {}".format(outputname)
json.dump(outp, open(outputname,'w'), indent=4, sort_keys=False)
print "[Searcher.search_from_image_filenames: log] Search done in {}s".format(time.time() - start_search)
return outp, outputname
def search_from_listid_get_simname(self, list_ids_sha1, search_id, check_already_computed=False):
# list_ids_sha1 will be list of tuples (integer_id, sha1)
final_featuresfile = search_id+'.dat'
# sanity check
nb_imgs = len(self.indexer.sha1_featid_mapping)
valid_ids_sha1 = []
corrupted = []
for img_id,sha1 in list_ids_sha1:
if img_id < nb_imgs:
if sha1 != self.indexer.sha1_featid_mapping[img_id]:
print "[Searcher.search_from_listid_get_simname: error] misaligned image {} vs. {} id {}".format(sha1, self.indexer.sha1_featid_mapping[img_id], img_id)
try:
sha1_pos = self.indexer.sha1_featid_mapping.index(sha1)
print "[Searcher.search_from_listid_get_simname: info] image {} can actually be found at {}".format(sha1, sha1_pos)
except Exception:
print "[Searcher.search_from_listid_get_simname: info] image {} cannot be found in index.".format(sha1)
corrupted.append(sha1)
else:
valid_ids_sha1.append((img_id, sha1))
else:
print "[Searcher.search_from_listid_get_simname] trying to access image {} when searching image {} while we have only {} images".format(img_id, sha1, nb_imgs)
corrupted.append(sha1)
if check_already_computed:
simname = final_featuresfile[:-4] + '-sim_'+str(self.ratio)+'.txt'
if os.path.isfile(simname):
print "[Searcher.search_from_listid_get_simname: log] found already existing output. Returning it."
# we could check we have len(valid_ids_sha1) lines, but the renaming to sim_ratio happens when the search is done...
return simname, corrupted
if valid_ids_sha1:
# get the features, hasher starts to count at 1
feats, ok_ids = self.indexer.hasher.get_precomp_feats([x[0]+1 for x in valid_ids_sha1])
if len(ok_ids) != len(valid_ids_sha1):
raise ValueError("[Searcher.search_from_sha1_list_get_simname: error] We did not get enough precomputed features ({}) from list of {} images.".format(len(ok_ids),len(list_ids_found)))
print "[Searcher.search_from_listid_get_simname: log] writing {} features to {}".format(len(valid_ids_sha1), final_featuresfile)
read_dim = self.features_dim*4
read_type = np.float32
features_wrote = 0
#print "[Searcher.search_from_image_filenames: log] feats {}".format(feats)
with open(final_featuresfile,'wb') as out:
for precomp_pos,img_id in enumerate(valid_ids_sha1):
tmp_feat = feats[precomp_pos][:]
#print "[Searcher.search_from_sha1_list_get_simname: log] tmp_feat for image {} has norm {} and is: {}".format(img_id, np.linalg.norm(tmp_feat), tmp_feat)
out.write(tmp_feat)
features_wrote += 1
if features_wrote:
# query with merged features_filename
print "[Searcher.search_from_listid_get_simname: log] searching for similar images from features file {}".format(final_featuresfile)
self.check_ratio()
simname = self.indexer.hasher.get_similar_images_from_featuresfile(final_featuresfile, self.ratio, near_dup_th=float(self.near_dup_th))
else:
print "[Searcher.search_from_listid_get_simname: log] no features to search for similar images."
simname = None
return simname, corrupted
def search_from_sha1_list_get_simname(self, all_img_sha1s, search_id):
# get indexed images
list_ids_sha1_found = self.indexer.get_ids_from_sha1s(all_img_sha1s)
tmp_list_ids_found = [x[0] for x in list_ids_sha1_found if x[0] is not None]
list_sha1_found = [x[1] for x in list_ids_sha1_found if x[0] is not None]
# this is to keep proper ordering
list_ids_found = [tmp_list_ids_found[list_sha1_found.index(sha1)] for sha1 in all_img_sha1s if sha1 in list_sha1_found]
#print "[Searcher.search_from_sha1_list: log] tmp_list_ids_found {}".format(tmp_list_ids_found)
#print "[Searcher.search_from_sha1_list_get_simname: log] list_ids_found {}".format(list_ids_found)
if list_ids_found:
# get the features, hasher starts to count at 1
feats, ok_ids = self.indexer.hasher.get_precomp_feats([x+1 for x in list_ids_found])
if len(ok_ids) != len(list_ids_found):
raise ValueError("[Searcher.search_from_sha1_list_get_simname: error] We did not get enough precomputed features ({}) from list of {} images.".format(len(ok_ids),len(list_ids_found)))
# this should not be empty
corrupted = list(set(all_img_sha1s)-set(list_sha1_found))
if corrupted:
print "[Searcher.search_from_sha1_list_get_simname: log] some sha1s were not found: {}".format(corrupted)
final_featuresfile = search_id+'.dat'
read_dim = self.features_dim*4
read_type = np.float32
features_wrote = 0
#print "[Searcher.search_from_image_filenames: log] feats {}".format(feats)
with open(final_featuresfile,'wb') as out:
for precomp_pos,img_id in enumerate(list_ids_found):
tmp_feat = feats[precomp_pos][:]
#print "[Searcher.search_from_sha1_list_get_simname: log] tmp_feat for image {} has norm {} and is: {}".format(img_id, np.linalg.norm(tmp_feat), tmp_feat)
out.write(tmp_feat)
features_wrote += 1
if features_wrote:
# query with merged features_filename
self.check_ratio()
simname = self.indexer.hasher.get_similar_images_from_featuresfile(final_featuresfile, self.ratio)
else:
simname = None
return simname, corrupted
def search_from_sha1_list(self, all_img_sha1s, search_id, options_dict=dict()):
# compute all sha1s
start_search = time.time()
simname, corrupted = self.search_from_sha1_list_get_simname(all_img_sha1s, search_id)
print "[Searcher.search_from_sha1_list: log] Search prepared in {}s".format(time.time() - start_search)
if simname is not None:
outputname = simname[:-4]+".json"
start_format = time.time()
outp = self.format_output(simname, len(all_img_sha1s), corrupted, all_img_sha1s, options_dict)
print "[Searcher.search_from_sha1_list: log] Formatting done in {}s".format(time.time() - start_format)
print "[Searcher.search_from_sha1_list: log] saving output to {}".format(outputname)
json.dump(outp, open(outputname,'w'), indent=4, sort_keys=False)
print "[Searcher.search_from_sha1_list: log] Search done in {}s".format(time.time() - start_search)
else:
# prepare dummy output?
outp = None
outputname = None
return outp, outputname
# # this is not yet working.
# def search_from_image_filenames_nodiskout(self, all_img_filenames, search_id, options_dict=dict()):
# # compute all sha1s
# start_search = time.time()
# corrupted = []
# list_sha1_id = []
# valid_images = []
# for i,image_name in enumerate(all_img_filenames):
# if image_name[0:4]!="http":
# sha1 = get_SHA1_from_file(image_name)
# if sha1:
# list_sha1_id.append(sha1)
# valid_images.append((i,sha1,image_name))
# else:
# print("[Searcher.search_from_image_filenames_nodiskout: log] image {} is corrupted.".format(image_name))
# corrupted.append(i)
# else: # we did not manage to download image
# # need to deal with that in output formatting too
# corrupted.append(i)
# #print "[Searcher.search_from_image_filenames: log] valid_images {}".format(valid_images)
# # get indexed images
# list_ids_sha1_found = self.indexer.get_ids_from_sha1s(list_sha1_id)
# print "[Searcher.search_from_image_filenames_nodiskout: log] list_ids_sha1_found {}".format(list_ids_sha1_found)
# tmp_list_ids_found = [x[0] for x in list_ids_sha1_found if x[0] is not None]
# list_sha1_found = [x[1] for x in list_ids_sha1_found if x[0] is not None]
# #print "[Searcher.search_from_image_filenames: log] list_sha1_id {}".format(list_sha1_id)
# #print "[Searcher.search_from_image_filenames: log] list_sha1_found {}".format(list_sha1_found)
# # this is to keep proper ordering
# list_ids_found = [tmp_list_ids_found[list_sha1_found.index(sha1)] for sha1 in list_sha1_id if sha1 in list_sha1_found]
# #print "[Searcher.search_from_image_filenames: log] tmp_list_ids_found {}".format(tmp_list_ids_found)
# print "[Searcher.search_from_image_filenames_nodiskout: log] list_ids_found {}".format(list_ids_found)
# if list_ids_found:
# # get the features, hasher starts to count at 1
# feats,ok_ids = self.indexer.hasher.get_precomp_feats([x+1 for x in list_ids_found])
# if len(ok_ids)!=len(list_ids_found):
# raise ValueError("[Searcher.search_from_image_filenames_nodiskout: error] We did not get enough precomputed features ({}) from list of {} images.".format(len(ok_ids),len(list_ids_found)))
# # compute new images features
# not_indexed_sha1 = set(list_sha1_id)-set(list_sha1_found)
# #res = self.indexer.get_precomp_from_sha1(list_ids_sha1_found)
# new_files = []
# all_valid_images = []
# precomp_img_filenames=[]
# for i,sha1,image_name in valid_images:
# if sha1 in list_sha1_found: # image is indexed
# precomp_img_filenames.append(image_name)
# else:
# new_files.append(image_name)
# all_valid_images.append(all_img_filenames[i])
# # check images are jpeg (and convert them here?)
# print "[Searcher.search_from_image_filenames_nodiskout: log] all_valid_images {}".format(all_valid_images)
# print "[Searcher.search_from_image_filenames_nodiskout: log] new_files {}".format(new_files)
# features_filename = self.compute_features_listimgfiles(new_files, search_id)
# #features_filename,ins_num = self.indexer.feature_extractor.compute_features(new_files,search_id)
# #if ins_num!=len(new_files):
# # raise ValueError("[Searcher.search_from_image_filenames: error] We did not get enough features ({}) from list of {} images.".format(ins_num,len(new_files)))
# # merge feats with features_filename
# final_featuresfile = search_id+'.dat'
# read_dim = self.features_dim*4
# read_type = np.float32
# features_wrote = 0
# #print "[Searcher.search_from_image_filenames: log] feats {}".format(feats)
# with open(features_filename,'rb') as new_feats, open(final_featuresfile,'wb') as out:
# for image_name in all_valid_images:
# #print "[Searcher.search_from_image_filenames: log] saving feature of image {}".format(image_name)
# if image_name in precomp_img_filenames:
# # select precomputed
# precomp_pos = precomp_img_filenames.index(image_name)
# #print "[Searcher.search_from_image_filenames: log] getting precomputed feature at position {}".format(precomp_pos)
# tmp_feat = feats[precomp_pos][:]
# else:
# # read from new feats
# tmp_feat = np.frombuffer(new_feats.read(read_dim),dtype=read_type)
# print "[Searcher.search_from_image_filenames_nodiskout: log] tmp_feat for image {} has norm {} and is: {}".format(image_name,np.linalg.norm(tmp_feat),tmp_feat)
# out.write(tmp_feat)
# features_wrote += 1
# print "[Searcher.search_from_image_filenames_nodiskout: log] Search prepared in {}s".format(time.time() - start_search)
# if features_wrote:
# # how to properly interact with out_res?
# out_res = hop.ResVector(self.indexer.hasher.get_similar_images_from_featuresfile_nodiskout(final_featuresfile, self.ratio))
# start_format = time.time()
# outp = self.format_output_nodiskout(out_res, len(all_img_filenames), corrupted, list_sha1_id, options_dict)
# print "[Searcher.search_from_image_filenames_nodiskout: log] Formatting done in {}s".format(time.time() - start_format)
# outputname = str(search_id)+"-sim.json"
# print "[Searcher.search_from_image_filenames_nodiskout: log] Saving output to {}".format(outputname)
# json.dump(outp, open(outputname,'w'), indent=4, sort_keys=False)
# print "[Searcher.search_from_image_filenames_nodiskout: log] Search done in {}s".format(time.time() - start_search)
# return outp, outputname
| |
from django.contrib.syndication import views
from django.utils import feedgenerator
from django.utils.timezone import get_fixed_timezone
from .models import Article, Entry
class TestRss2Feed(views.Feed):
title = 'My blog'
description = 'A more thorough description of my blog.'
link = '/blog/'
feed_guid = '/foo/bar/1234'
author_name = 'Sally Smith'
author_email = 'test@example.com'
author_link = 'http://www.example.com/'
categories = ('python', 'django')
feed_copyright = 'Copyright (c) 2007, Sally Smith'
ttl = 600
def items(self):
return Entry.objects.all()
def item_description(self, item):
return "Overridden description: %s" % item
def item_pubdate(self, item):
return item.published
def item_updateddate(self, item):
return item.updated
item_author_name = 'Sally Smith'
item_author_email = 'test@example.com'
item_author_link = 'http://www.example.com/'
item_categories = ('python', 'testing')
item_copyright = 'Copyright (c) 2007, Sally Smith'
class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed):
def item_guid_is_permalink(self, item):
return True
class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed):
def item_guid(self, item):
return str(item.pk)
def item_guid_is_permalink(self, item):
return False
class TestRss091Feed(TestRss2Feed):
feed_type = feedgenerator.RssUserland091Feed
class TestNoPubdateFeed(views.Feed):
title = 'Test feed'
link = '/feed/'
def items(self):
return Entry.objects.all()
class TestAtomFeed(TestRss2Feed):
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
class TestLatestFeed(TestRss2Feed):
"""
A feed where the latest entry date is an `updated` element.
"""
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
def items(self):
return Entry.objects.exclude(pk=5)
class ArticlesFeed(TestRss2Feed):
"""
A feed to test no link being defined. Articles have no get_absolute_url()
method, and item_link() is not defined.
"""
def items(self):
return Article.objects.all()
class TestSingleEnclosureRSSFeed(TestRss2Feed):
"""
A feed to test that RSS feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return 'http://example.com'
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return 'image/png'
class TestMultipleEnclosureRSSFeed(TestRss2Feed):
"""
A feed to test that RSS feeds raise an exception with multiple enclosures.
"""
def item_enclosures(self, item):
return [
feedgenerator.Enclosure('http://example.com/hello.png', 0, 'image/png'),
feedgenerator.Enclosure('http://example.com/goodbye.png', 0, 'image/png'),
]
class TemplateFeed(TestRss2Feed):
"""
A feed to test defining item titles and descriptions with templates.
"""
title_template = 'syndication/title.html'
description_template = 'syndication/description.html'
# Defining a template overrides any item_title definition
def item_title(self):
return "Not in a template"
class TemplateContextFeed(TestRss2Feed):
"""
A feed to test custom context data in templates for title or description.
"""
title_template = 'syndication/title_context.html'
description_template = 'syndication/description_context.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['foo'] = 'bar'
return context
class TestLanguageFeed(TestRss2Feed):
language = 'de'
class NaiveDatesFeed(TestAtomFeed):
"""
A feed with naive (non-timezone-aware) dates.
"""
def item_pubdate(self, item):
return item.published
class TZAwareDatesFeed(TestAtomFeed):
"""
A feed with timezone-aware dates.
"""
def item_pubdate(self, item):
# Provide a weird offset so that the test can know it's getting this
# specific offset and not accidentally getting on from
# settings.TIME_ZONE.
return item.published.replace(tzinfo=get_fixed_timezone(42))
class TestFeedUrlFeed(TestAtomFeed):
feed_url = 'http://example.com/customfeedurl/'
class MyCustomAtom1Feed(feedgenerator.Atom1Feed):
"""
Test of a custom feed generator class.
"""
def root_attributes(self):
attrs = super().root_attributes()
attrs['django'] = 'rocks'
return attrs
def add_root_elements(self, handler):
super().add_root_elements(handler)
handler.addQuickElement('spam', 'eggs')
def item_attributes(self, item):
attrs = super().item_attributes(item)
attrs['bacon'] = 'yum'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
handler.addQuickElement('ministry', 'silly walks')
class TestCustomFeed(TestAtomFeed):
feed_type = MyCustomAtom1Feed
class TestSingleEnclosureAtomFeed(TestAtomFeed):
"""
A feed to test that Atom feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return 'http://example.com'
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return 'image/png'
class TestMultipleEnclosureAtomFeed(TestAtomFeed):
"""
A feed to test that Atom feeds work with multiple enclosures.
"""
def item_enclosures(self, item):
return [
feedgenerator.Enclosure('http://example.com/hello.png', '0', 'image/png'),
feedgenerator.Enclosure('http://example.com/goodbye.png', '0', 'image/png'),
]
| |
# Copyright (C) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Burrow command line client and shell.
'''
from __future__ import print_function
import optparse
import os
import pwd
import sys
import types
import burrow
from burrow.openstack.common.gettextutils import _
class Shell(object):
'''Shell session class.'''
sections = [
dict(name='Global',
filters=True,
args=[],
commands=['delete_accounts', 'get_accounts']),
dict(name='Account',
account=True,
filters=True,
args=[],
commands=['delete_queues', 'get_queues']),
dict(name='Queue',
account=True,
filters=True,
args=['queue'],
commands=['delete_messages', 'get_messages', 'update_messages']),
dict(name='Message',
account=True,
args=['queue', 'message'],
commands=[
'create_message',
'delete_message',
'get_message',
'update_message'])]
attribute_commands = [
'update_messages',
'create_message',
'update_message']
stdin_commands = ['create_message']
def __init__(self):
self.parser = optparse.OptionParser(usage='burrow [options] <command>',
version=burrow.__version__)
self.parser.add_option('-c', '--commands', action='store_true',
help=_('Print help for the available commands'))
self.parser.add_option('-u', '--url', default='http://localhost:8080',
help=_('Backend URL to use'))
rcfile = os.path.expanduser('~')
rcfile = os.path.join(rcfile, '.burrowrc')
if not os.path.exists(rcfile):
rcfile = None
self.parser.add_option('-f', '--files', default=rcfile,
help=_('Configuration file(s) to use (comma separated)'))
user = pwd.getpwuid(os.getuid())[0]
self.parser.add_option('-a', '--account', default=user,
help=_('Account to use for queue and message commands'))
self.parser.add_option('-w', '--wait',
help=_('Number of seconds to wait if no messages match'))
attributes = optparse.OptionGroup(self.parser,
_('Messages attribute options'))
attributes.add_option('-t', '--ttl',
help=_('TTL attribute in seconds to set for message(s)'))
attributes.add_option('-H', '--hide',
help=_('Hidden time attribute in seconds to set for message(s)'))
self.parser.add_option_group(attributes)
filters = optparse.OptionGroup(self.parser, _('Filtering options'))
filters.add_option('-l', '--limit',
help=_('Limit the number of messages to match'))
filters.add_option('-m', '--marker',
help=_('Only match messages that were inserted after this id'))
filters.add_option('-A', '--all', action='store_true',
help=_('Match all messages, including those that are hidden'))
choices = ['none', 'id', 'attributes', 'all']
filters.add_option('-d', '--detail', type='choice', choices=choices,
help=_('What message information to return. Options are: %s') %
', '.join(choices))
self.parser.add_option_group(filters)
(self.options, self.args) = self.parser.parse_args()
if self.options.commands:
self.print_help()
sys.exit(1)
if self.options.files is None:
files = []
else:
files = self.options.files.split(', ')
self.client = burrow.Client(url=self.options.url, config_files=files)
def run(self):
'''Run the command given in arguments or enter an interactive shell.'''
if len(self.args) == 0:
for command in self._get_command():
try:
self.run_command(command[0], command[1:])
except burrow.NotFound as exception:
print(exception)
else:
self.run_command(self.args[0], self.args[1:])
def _get_command(self):
'''Get a command from stdin, printing a prompt out if stdin
is attached to a TTY.'''
prompt = ''
if os.isatty(sys.stdin.fileno()):
prompt = 'burrow> '
try:
# Try importing readline to make raw_input functionality more
# user friendly.
import readline # flake8: noqa
except ImportError:
pass
while True:
try:
command = raw_input(prompt)
except KeyboardInterrupt:
break
except EOFError:
if os.isatty(sys.stdin.fileno()):
print
break
command = command.split()
if len(command) == 0:
continue
if command[0] == 'help':
self.print_help(print_options_help=False)
continue
if command[0] == 'exit' or command[0] == 'quit':
break
yield command
def run_command(self, command, args):
'''Try running a command with the given arguments.'''
section = self._get_section(command)
if section is None:
print(_('Command not found: %s') % command)
return
if len(args) != len(section['args']):
for arg in section['args']:
command += ' <%s>' % arg
print(_('Wrong number of arguments: %s') % command)
return
if section.get('account', None):
args.insert(0, self.options.account)
if command in self.stdin_commands:
args.append(sys.stdin.read())
if command in self.attribute_commands:
args.append(self._pack_attributes())
if section.get('filters', None):
args.append(self._pack_filters())
result = getattr(self.client, command)(*args)
self._print_result(result)
def _get_section(self, command):
'''Lookup command in the defined command sections.'''
for section in self.sections:
if command in section['commands']:
return section
return None
def _pack_attributes(self):
'''Pack attributes given in command line options.'''
attributes = {}
if self.options.ttl is not None:
attributes['ttl'] = self.options.ttl
if self.options.hide is not None:
attributes['hide'] = self.options.hide
return attributes
def _pack_filters(self):
'''Pack filters given in command line options.'''
filters = {}
if self.options.limit is not None:
filters['limit'] = self.options.limit
if self.options.marker is not None:
filters['marker'] = self.options.marker
if self.options.all is not None:
filters['match_hidden'] = self.options.all
if self.options.detail is not None:
filters['detail'] = self.options.detail
return filters
def _print_result(self, result):
'''Format and print the result.'''
if isinstance(result, list) or isinstance(result, types.GeneratorType):
for item in result:
if isinstance(item, dict):
self._print_message(item)
else:
print(item)
elif isinstance(result, dict):
self._print_message(result)
elif result is not None:
print(result)
def _print_message(self, item):
'''Format and print message.'''
print('id =', item['id'])
for key, value in item.iteritems():
if key != 'id':
print(' ', key, '=', value)
def print_help(self, print_options_help=True):
'''Print the parser generated help along with burrow command help.'''
if print_options_help:
self.parser.print_help()
print()
for section in self.sections:
print('%s commands:' % section['name'])
for command in section['commands']:
help_string = ''
if section.get('filters', None):
help_string += ' [filters]'
if command in self.attribute_commands:
help_string += ' [attributes]'
for arg in section['args']:
help_string += ' <%s>' % arg
print(' %s%s' % (command, help_string))
print()
def main():
return Shell().run()
if __name__ == '__main__':
main()
| |
"""Indices to the PHI5 Latin corpus."""
PHI5_INDEX = {
"LAT0428": "Bellum Alexandrinum [Anonymous]",
"LAT1276": "Decimus Iunius Iuvenalis, Juvenal",
"LAT0448": "Gaius Iulius Caesar, Caesar",
"LAT0490": "Publius Cominius",
"LAT0514": "Egnatius",
"LAT9254": "Titius, gram.",
"LAT2305": "Caelius Aurelianus",
"LAT0484": "Lucius Cincius",
"LAT1345": "Silius Italicus",
"LAT0474": "Marcus Tullius Cicero, Cicero, Tully",
"LAT0614": "Q. Pompeius Q.f.A.n. Rufus",
"LAT0510": "Publius Cornelius Dolabella",
"LAT0413": "Gavius Bassus",
"LAT9510": "Anonymi Grammatici",
"LAT0452": "Gaius Iulius Caesar Strabo",
"LAT0875": "Cn. Cornel. Lentulus Gaetulicus",
"LAT0648": "Staberius Eros",
"LAT0990": "Precatio Omnium Herbarum",
"LAT1672": "Iulius Valerius",
"LAT0451": "Sinnius Capito",
"LAT0404": "Lucius Afranius",
"LAT0143": "Trabea",
"LAT0528": "Granius Flaccus",
"LAT0455": "Gaius Calpurnius Piso",
"LAT0635": "Publius Saturius",
"LAT1212": "Apuleius Madaurensis",
"LAT0624": "Quintus Claudius Quadrigarius",
"LAT0592": "Novius, comoed.",
"LAT0478": "Quintus Tullius Cicero",
"LAT0472": "Gaius Valerius Catullus",
"LAT0568": "Gnaeus Matius",
"LAT0450": "Lucius Iulius Caesar",
"LAT2003": "Caelius Apicius",
"LAT0420": "Publius Aufidius Namusa",
"LAT0564": "Manius Manilius",
"LAT0806": "Gaius Ateius Capito",
"LAT0923": "Aemilius Macer",
"LAT0905": "Marcus Antistius Labeo",
"LAT1363": "Aemilius Asper",
"LAT0469": "Lucius Cassius Longinus",
"LAT0630": "Sacra Argeorum",
"LAT1291": "Marianus",
"LAT0116": "Marcus Pacuvius",
"LAT0306": "Carmen Devotionis",
"LAT0987": "Publius Pomponius Secundus",
"LAT0646": "Lucius Cornelius Sisenna",
"LAT0552": "Quintus Lutatius Catulus",
"LAT0730": "Tarquitius Priscus",
"LAT0405": "Clodius Tuscus",
"LAT9505": "Anonymi Comici et Tragici",
"LAT0727": "Cornificius Longus",
"LAT0082": "Decimus Iunius Silanus",
"LAT2002": "Albinus, poet.",
"LAT1221": "C. Iul. Caes. Augustus Octavianus",
"LAT0466": "Aulus Cascellius",
"LAT0972": "Petronius",
"LAT0073": "Gaius Sempronius Gracchus",
"LAT2301": "Q. Aurelius Memmius Symmachus",
"LAT1100": "Calpurnius Flaccus",
"LAT0104": "Gaius Memmius",
"LAT0929": "Pomponius Mela",
"LAT1020": "Publius Papinius Statius",
"LAT1050": "Lucius Verginius Rufus",
"LAT1294": "Marcus Valerius Martialis",
"LAT0574": "Gaius Memmius L. f.",
"LAT0402": "Valerius Aedituus",
"LAT0530": "Aulus Hirtius",
"LAT0500": "Lucius Licinius Crassus",
"LAT0591": "Ninnius Crassus",
"LAT0486": "Gaius Helvius Cinna",
"LAT0400": "Lucius Accius",
"LAT1206": "Lucius Ampelius",
"LAT0058": "Q. Fabius Maximus Servilianus",
"LAT0920": "Lucilius iunior",
"LAT0007": "Atilius",
"LAT1103": "Priapea",
"LAT0944": "Imperator Nero",
"LAT0815": "Bruttedius Niger",
"LAT0494": "Commentarii Consulares",
"LAT0409": "Quintus Cornificius",
"LAT0496": "Commentarius Anquisit. Sergii",
"LAT0812": "Gaius Caesius Bassus",
"LAT0085": "Gaius Laelius Sapiens",
"LAT0690": "Publius Vergilius Maro, Virgil, Vergil",
"LAT0149": "Carmen Arvale",
"LAT0887": "Grattius",
"LAT0966": "Passienus Crispus",
"LAT1321": "Sextus Pomponius",
"LAT0140": "Gaius Titius",
"LAT1263": "Hyginus, myth.",
"LAT1380": "Philumenus medicus",
"LAT0587": "Naevius, iunior",
"LAT0533": "Gaius Iulius Hyginus",
"LAT0596": "Numitorius",
"LAT0821": "Bucolica Einsidlensia",
"LAT0301": "Gnaeus Domitius Ahenobarbus",
"LAT0456": "Gaius Licinius Macer Calvus",
"LAT0031": "Cornelia, mater Gracchorum",
"LAT0959": "Publius Ovidius Naso",
"LAT1342": "Siculus Flaccus",
"LAT0309": "Carmen Evocationis",
"LAT0926": "Marcus Manilius, Manilius",
"LAT0586": "Mummius",
"LAT0444": "Marcus Caelius Rufus",
"LAT0112": "Gnaeus Naevius",
"LAT0682": "Lucius Varius Rufus",
"LAT1377": "Fragmenta Bobiensia",
"LAT0534": "Iuventius, comoed.",
"LAT0588": "Cornelius Nepos",
"LAT0425": "Publius Rutilius Lupus",
"LAT0984": "Pompeius Trogus",
"LAT0824": "Cn. Arulenus Caelius Sabinus",
"LAT1236": "Sextus Pompeius Festus",
"LAT1500": "Altercatio Hadr. et Epicteti",
"LAT0302": "Marcus Antonius",
"LAT0076": "Gaius Cassius Hemina",
"LAT1035": "Gaius Valerius Flaccus",
"LAT2150": "Zeno of Verona",
"LAT0536": "Decimus Laberius",
"LAT9221": "Paulus Quaestor",
"LAT0863": "Dorcatius",
"LAT0636": "Quintus Mucius Scaevola",
"LAT0661": "Ticidas",
"LAT0576": "M. Valerius Messalla Rufus",
"LAT0067": "Favorinus",
"LAT0538": "Laevius",
"LAT1254": "Aulus Gellius",
"LAT0556": "Gaius Licinius Macer",
"LAT0640": "Marcus Aemilius Scaurus",
"LAT1374": "Velius Longus",
"LAT2331": "Scriptores Historiae Augustae",
"LAT0103": "Gnaeus Marcius vates",
"LAT1279": "Laelius Felix",
"LAT0010": "Marcus Iunius Brutus [iur.]",
"LAT0106": "Caecilius Metellus",
"LAT0584": "Mimi Poetarum Incertorum",
"LAT1044": "Velleius Paterculus",
"LAT1056": "Vitruvius",
"LAT0709": "Domitius Marsus",
"LAT0134": "Publius Terentius Afer, Terence",
"LAT0836": "Aulus Cornelius Celsus",
"LAT0088": "M. Aemilius Lepidus Porcina",
"LAT1348": "Gaius Suetonius Tranquillus",
"LAT0013": "Caecilius Statius",
"LAT0109": "Q. Caecilius Metellus Maced.",
"LAT0684": "Marcus Terentius Varro, Varro",
"LAT0638": "Q. Mucius Scaevola [pontifex]",
"LAT0324": "Saserna",
"LAT0692": "Appendix Vergiliana",
"LAT0498": "Gaius Aurelius Cotta",
"LAT0845": "L. Iunius Moderatus Columella",
"LAT1047": "Veranius",
"LAT0899": "Hyginus Astronomus",
"LAT0878": "Gaius Asinius Gallus",
"LAT0724": "Cloatius Verus",
"LAT1038": "Valerius Maximus",
"LAT0703": "Arbonius Silo",
"LAT0721": "Antonius Panurgus",
"LAT0418": "Titus Quinctius Atta",
"LAT2300": "Aemilius Sura",
"LAT0662": "Marcus Tullius Tiro",
"LAT0540": "Tullius Laurea",
"LAT0432": "Marcus Furius Bibaculus",
"LAT1242": "Annius Florus",
"LAT1512": "Pomponius Porphyrio",
"LAT0416": "Lucius Ateius Praetextatus",
"LAT0327": "L. Aelius Praeconinus Stilo",
"LAT0932": "M. Valerius Messalla Corvinus",
"LAT0097": "Gaius Lucilius",
"LAT0866": "Fenestella",
"LAT0458": "Publius Cannutius",
"LAT0127": "P. Cornel. Scipio Afr. ma.",
"LAT0034": "Gaius Scribonius Curio avus",
"LAT0079": "Hostius",
"LAT0996": "Marcus Valerius Probus",
"LAT0430": "Bellum Hispaniense [Anonymous]",
"LAT1011": "Scribonius Largus",
"LAT0043": "Quintus Ennius",
"LAT0546": "Gaius Licinius Mucianus",
"LAT0674": "Valerius, comoed.",
"LAT1053": "Vibius Crispus",
"LAT2097": "Sextus Paconianus",
"LAT0827": "Caesellius Vindex",
"LAT1029": "Turnus",
"LAT0061": "Fabius Pictor",
"LAT0656": "Servius Sulpicius Rufus",
"LAT0869": "Marcus Verrius Flaccus",
"LAT1306": "Lucius Neratius Priscus",
"LAT1005": "Rabirius",
"LAT1235": "Didascaliae et Per. in Terentium",
"LAT0975": "Phaedrus",
"LAT0541": "Cn. Cornel. Lentulus Marcell.",
"LAT1908": "Gallus Antipater",
"LAT0694": "Volumnius",
"LAT0582": "Q. Caecilius Metellus Numid.",
"LAT0706": "Carmen de Bello Aegyptiaco",
"LAT1518": "Terentianus Maurus",
"LAT0650": "Sueius",
"LAT0446": "Quintus Servilius Caepio",
"LAT0419": "Lucius Orbilius Pupillus",
"LAT0644": "Sextilius Ena",
"LAT0100": "Luscius Lanuvinus",
"LAT0600": "Gaius Oppius",
"LAT0442": "Aulus Caecina",
"LAT0423": "Lucius Herennius Balbus",
"LAT0408": "Marcus Antonius triumvir",
"LAT0854": "Cornificius Gallus",
"LAT1336": "Scaevus Memor",
"LAT0590": "Publius Nigidius Figulus",
"LAT0628": "Publius Rutilius Rufus",
"LAT1351": "Cornelius Tacitus",
"LAT0028": "Lucius Coelius Antipater",
"LAT0642": "Sevius Nicanor",
"LAT0117": "Papinius, epigram.",
"LAT0615": "Q. Pompeius Q.f.Q.n. Rufus",
"LAT0914": "Titus Livius, Livy",
"LAT0460": "Gaius Papirius Carbo Arvina",
"LAT1014": "Lucius Annaeus Seneca senior",
"LAT0025": "Marcus Porcius Cato M.f.M.n.",
"LAT1604": "Iulius Atherianus",
"LAT0128": "P. Cornel. Scipio Aem. Afr.",
"LAT2302": "L. Aurel. Avianius Symmachus",
"LAT0893": "Quintus Horatius Flaccus, Horace",
"LAT1282": "Lentulus, mimus",
"LAT2434": "Hilarius Arelatensis",
"LAT0426": "Bellum Africum [Anonymous]",
"LAT0016": "Lucius Calpurnius Piso Frugi",
"LAT0981": "Gaius Asinius Pollio",
"LAT0445": "Gaius vel Lucius Caepasius",
"LAT0660": "Albius Tibullus",
"LAT0631": "Gaius Sallustius Crispus",
"LAT1318": "C. Plinius Caecilius Secundus, Pliny",
"LAT0522": "Gaius Aelius Gallus",
"LAT0488": "Servius Clodius",
"LAT0678": "Quintus Valerius Soranus",
"LAT1002": "Marcus Fabius Quintilianus",
"LAT1339": "Septimius Serenus",
"LAT1266": "Hyginus Gromaticus",
"LAT0676": "Valerius Antias",
"LAT1506": "Anonymi Fragmenta de Iure Fisci",
"LAT0686": "P. Terentius Varro Atacinus",
"LAT0064": "Gaius Fannius",
"LAT0857": "Lucius Annaeus Cornutus",
"LAT0518": "Aulus Furius Antias",
"LAT0537": "Titus Labienus",
"LAT0890": "Homerus Latinus",
"LAT0550": "Titus Lucretius Carus",
"LAT0406": "Publius Alfenus Varus",
"LAT0881": "Claudius Caesar Germanicus",
"LAT0620": "Sextus Propertius",
"LAT0137": "Titinius",
"LAT0512": "Marcus Duronius",
"LAT0911": "Laus Pisonis",
"LAT0625": "Lucius Quinctius",
"LAT0005": "Aquilius, comoed.",
"LAT0963": "Quintus Remmius Palaemon",
"LAT0122": "Aulus Postumius Albinus",
"LAT0473": "Q. Lutatius Catulus iunior",
"LAT0809": "Aufidius Bassus",
"LAT0527": "Gannius",
"LAT0454": "Marcus Calidius",
"LAT1251": "Gaius, iur., Gaius",
"LAT1370": "Quintus Terentius Scaurus",
"LAT0146": "Sextus Turpilius",
"LAT0842": "Gaius Clodius Licinus",
"LAT2335": "Anonymi de Differentiis [Fronto]",
"LAT0532": "Quintus Hortensius Hortalus",
"LAT0004": "Appius Claudius Caecus",
"LAT0594": "Lucius Novius",
"LAT0410": "Aprissius (?)",
"LAT0526": "Gaius Servilius Glaucia",
"LAT0800": "Albinovanus Pedo",
"LAT0884": "Gracchus, trag.",
"LAT0094": "Lucius Livius Andronicus",
"LAT0652": "Lucius Cornelius Sulla",
"LAT0618": "Lucius Pomponius Bononiensis",
"LAT0412": "Gaius Aquilius Gallus",
"LAT1248": "Marcus Cornelius Fronto",
"LAT1234": "Didascaliae et Argum. in Plautum",
"LAT1203": "Alfius Avitus",
"LAT0622": "Publilius Syrus",
"LAT0851": "Cornelius Severus",
"LAT3211": "Argum. Aen. et Tetrast.",
"LAT0606": "Lucius Marcius Philippus",
"LAT1041": "Pseudo-Varro",
"LAT0830": "Titus Calpurnius Siculus",
"LAT0002": "Titus Annius Luscus",
"LAT0664": "Gaius Trebatius Testa",
"LAT0046": "Cornelius Epicadus",
"LAT0680": "Gaius Valgius Rufus",
"LAT0902": "Iulius Africanus",
"LAT2000": "Ablabius",
"LAT0993": "Precatio Terrae",
"LAT9500": "Anonymi Epici et Lyrici",
"LAT0470": "Marcus Porcius Cato Uticensis",
"LAT0487": "Publius Clodius Pulcher",
"LAT1023": "Sulpicia, Caleni uxor",
"LAT0312": "Fabius Dossennus",
"LAT0670": "Quintus Aelius Tubero",
"LAT1285": "Lucius Volusius Maecianus",
"LAT0022": "Marcus Porcius Cato, Cato",
"LAT1245": "Sextus Iulius Frontinus",
"LAT0935": "Iulius Modestus",
"LAT1032": "Vagellius",
"LAT0130": "P. Cornel. Scipio Nasica Ser.",
"LAT1017": "Lucius Annaeus Seneca iunior",
"LAT0414": "Lucius Arruntius",
"LAT0330": "Volcacius Sedigitus",
"LAT0634": "Santra",
"LAT1227": "Balbus, grom.",
"LAT1515": "Quintus Serenus (Sammonicus)",
"LAT0401": "Aufustius",
"LAT0908": "Attius Labeo",
"LAT0938": "Iulius Montanus",
"LAT0091": "Licinius Imbrex",
"LAT1357": "Imp. Marcus Ulpius Traianus, Trajan",
"LAT0037": "Gaius Scribonius Curio pater",
"LAT1224": "Marcus Aurelius",
"LAT0492": "Commentarii Augurum",
"LAT0535": "Marcus Iuventius Laterensis",
"LAT0978": "Gaius Plinius Secundus, Pliny",
"LAT2468": "Aurelius Augustinus",
"LAT1327": "Sabidius",
"LAT0860": "Quintus Curtius Rufus",
"LAT1260": "Hadrianus",
"LAT1218": "Sentius Augurinus",
"LAT0125": "Publius Mucius Scaevola",
"LAT0119": "Titus Maccius Plautus",
"LAT0502": "Aulus Cremutius Cordus",
"LAT1297": "Marullus",
"LAT0668": "Gnaeus Tremelius Scrofa",
"LAT1229": "Flavius Caper",
"LAT0515": "Sextus (vel Spurius) Ennius",
"LAT0672": "Turranius Niger",
"LAT0019": "Gaius Papirius Carbo",
"LAT0917": "Marcus Annaeus Lucanus",
"LAT9969": "Vita Iuvenalis",
"LAT0516": "Gaius Erucius",
"LAT1209": "Annianus",
"LAT2123": "Publilius Optatianus Porfyrius",
"LAT2456": "Parthenius Presbyter",
"LAT2028": "Chalcidius",
"LAT0303": "Aurelius Opillus",
"LAT2349": "Maurus Servius Honoratus, Servius",
"LAT1257": "Granius Licinianus",
"LAT1000": "Pupius (?)",
"LAT0070": "Gnaeus Gellius",
"LAT2806": "Iustinianus, Justinian, Digest",
"LAT0658": "Tabulae Censoriae",
"LAT0027": "Lucius Cincius Alimentus",
"LAT0524": "Gaius Cornelius Gallus, Gallus",
"LAT0560": "Helvius Mancia",
"LAT0558": "Gaius Cilnius Maecenas",
"LAT0562": "Manilius, poet.",
"LAT0436": "Marcus Iunius Brutus [tyr.]",
"LAT0803": "Quintus Asconius Pedianus",
"LAT0118": "L. Aemilius L.f.M.n. Paulus",
"LAT0321": "Porcius Licinus",
"LAT0616": "Pompilius",
"LAT0300": "Sempronius Asellio",
"LAT0315": "Marcus Iunius Gracchanus",
"LAT0969": "Aulus Persius Flaccus",
}
PHI5_WORKS_INDEX = {
"LAT0528": {"works": ["001"], "name": "Granius Flaccus"},
"LAT0650": {"works": ["001"], "name": "Sueius"},
"LAT1380": {"works": ["001"], "name": "Philumenus medicus"},
"LAT2002": {"works": ["001", "002"], "name": "Albinus, poet."},
"LAT0845": {"works": ["001", "002"], "name": "L. Iunius Moderatus Columella"},
"LAT1245": {
"works": ["001", "002", "003", "004", "005", "006"],
"name": "Sextus Iulius Frontinus",
},
"LAT0568": {"works": ["001"], "name": "Gnaeus Matius"},
"LAT0532": {"works": ["001", "002"], "name": "Quintus Hortensius Hortalus"},
"LAT0672": {"works": ["001"], "name": "Turranius Niger"},
"LAT0117": {"works": ["001"], "name": "Papinius, epigram."},
"LAT0824": {"works": ["001"], "name": "Cn. Arulenus Caelius Sabinus"},
"LAT1363": {"works": ["001", "002"], "name": "Aemilius Asper"},
"LAT0574": {"works": ["001", "002"], "name": "Gaius Memmius L. f."},
"LAT0842": {"works": ["001"], "name": "Gaius Clodius Licinus"},
"LAT0670": {"works": ["001", "002"], "name": "Quintus Aelius Tubero"},
"LAT0466": {"works": ["001"], "name": "Aulus Cascellius"},
"LAT2331": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
"019",
"020",
"021",
"022",
"023",
"024",
"025",
"026",
"027",
"028",
"029",
"030",
],
"name": "Scriptores Historiae Augustae",
},
"LAT1263": {"works": ["001"], "name": "Hyginus, myth."},
"LAT1260": {"works": ["001", "002"], "name": "Hadrianus"},
"LAT0442": {"works": ["002"], "name": "Aulus Caecina"},
"LAT0674": {"works": ["001"], "name": "Valerius, comoed."},
"LAT0149": {"works": ["001"], "name": "Carmen Arvale"},
"LAT2806": {"works": ["002"], "name": "Iustinianus, Justinian, Digest"},
"LAT0588": {"works": ["001", "002"], "name": "Cornelius Nepos"},
"LAT0515": {"works": ["001"], "name": "Sextus (vel Spurius) Ennius"},
"LAT0127": {"works": ["001"], "name": "P. Cornel. Scipio Afr. ma."},
"LAT1306": {"works": ["002"], "name": "Lucius Neratius Priscus"},
"LAT0470": {"works": ["002"], "name": "Marcus Porcius Cato Uticensis"},
"LAT0703": {"works": ["001"], "name": "Arbonius Silo"},
"LAT2305": {"works": ["001"], "name": "Caelius Aurelianus"},
"LAT0445": {"works": ["001"], "name": "Gaius vel Lucius Caepasius"},
"LAT0899": {"works": ["001"], "name": "Hyginus Astronomus"},
"LAT0806": {"works": ["001"], "name": "Gaius Ateius Capito"},
"LAT1294": {"works": ["001", "002"], "name": "Marcus Valerius Martialis"},
"LAT0301": {"works": ["001"], "name": "Gnaeus Domitius Ahenobarbus"},
"LAT0315": {"works": ["001"], "name": "Marcus Iunius Gracchanus"},
"LAT0088": {"works": ["001"], "name": "M. Aemilius Lepidus Porcina"},
"LAT0662": {"works": ["001"], "name": "Marcus Tullius Tiro"},
"LAT1285": {"works": ["001"], "name": "Lucius Volusius Maecianus"},
"LAT0893": {
"works": ["001", "002", "003", "004", "005", "006"],
"name": "Quintus Horatius Flaccus, Horace",
},
"LAT0306": {"works": ["001"], "name": "Carmen Devotionis"},
"LAT0524": {"works": ["001", "002"], "name": "Gaius Cornelius Gallus, Gallus"},
"LAT0484": {"works": ["001", "002"], "name": "Lucius Cincius"},
"LAT1206": {"works": ["001"], "name": "Lucius Ampelius"},
"LAT0975": {"works": ["001", "002"], "name": "Phaedrus"},
"LAT0587": {"works": ["001"], "name": "Naevius, iunior"},
"LAT0405": {"works": ["001"], "name": "Clodius Tuscus"},
"LAT1044": {"works": ["001"], "name": "Velleius Paterculus"},
"LAT0130": {"works": ["001"], "name": "P. Cornel. Scipio Nasica Ser."},
"LAT0996": {"works": ["001", "002", "003"], "name": "Marcus Valerius Probus"},
"LAT1017": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
],
"name": "Lucius Annaeus Seneca iunior",
},
"LAT0104": {"works": ["001"], "name": "Gaius Memmius"},
"LAT0516": {"works": ["001"], "name": "Gaius Erucius"},
"LAT0028": {"works": ["001"], "name": "Lucius Coelius Antipater"},
"LAT1604": {"works": ["001"], "name": "Iulius Atherianus"},
"LAT0109": {"works": ["001"], "name": "Q. Caecilius Metellus Maced."},
"LAT0458": {"works": ["001"], "name": "Publius Cannutius"},
"LAT0451": {"works": ["001"], "name": "Sinnius Capito"},
"LAT0512": {"works": ["001"], "name": "Marcus Duronius"},
"LAT1266": {
"works": ["001", "002", "003", "004", "005"],
"name": "Hyginus Gromaticus",
},
"LAT1291": {"works": ["001"], "name": "Marianus"},
"LAT0309": {"works": ["001"], "name": "Carmen Evocationis"},
"LAT0959": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
],
"name": "Publius Ovidius Naso",
},
"LAT0984": {"works": ["001", "002"], "name": "Pompeius Trogus"},
"LAT0416": {"works": ["001"], "name": "Lucius Ateius Praetextatus"},
"LAT9510": {"works": ["001"], "name": "Anonymi Grammatici"},
"LAT0730": {"works": ["001"], "name": "Tarquitius Priscus"},
"LAT0592": {"works": ["001"], "name": "Novius, comoed."},
"LAT0076": {"works": ["001"], "name": "Gaius Cassius Hemina"},
"LAT0631": {
"works": ["001", "002", "003", "004", "005", "006", "007", "008"],
"name": "Gaius Sallustius Crispus",
},
"LAT0146": {"works": ["001"], "name": "Sextus Turpilius"},
"LAT0537": {"works": ["001"], "name": "Titus Labienus"},
"LAT0969": {"works": ["001"], "name": "Aulus Persius Flaccus"},
"LAT0636": {"works": ["001"], "name": "Quintus Mucius Scaevola"},
"LAT0902": {"works": ["001"], "name": "Iulius Africanus"},
"LAT2468": {"works": ["001"], "name": "Aurelius Augustinus"},
"LAT1011": {"works": ["001"], "name": "Scribonius Largus"},
"LAT0884": {"works": ["001"], "name": "Gracchus, trag."},
"LAT0067": {"works": ["001"], "name": "Favorinus"},
"LAT0920": {"works": ["001"], "name": "Lucilius iunior"},
"LAT0444": {"works": ["002"], "name": "Marcus Caelius Rufus"},
"LAT0324": {"works": ["001"], "name": "Saserna"},
"LAT0596": {"works": ["001"], "name": "Numitorius"},
"LAT0327": {"works": ["001"], "name": "L. Aelius Praeconinus Stilo"},
"LAT0546": {"works": ["001"], "name": "Gaius Licinius Mucianus"},
"LAT0881": {"works": ["001", "002", "003"], "name": "Claudius Caesar Germanicus"},
"LAT9505": {
"works": ["001", "002", "003", "004"],
"name": "Anonymi Comici et Tragici",
},
"LAT0514": {"works": ["001"], "name": "Egnatius"},
"LAT0981": {"works": ["001", "003", "004", "005"], "name": "Gaius Asinius Pollio"},
"LAT0409": {"works": ["001"], "name": "Quintus Cornificius"},
"LAT0432": {"works": ["001"], "name": "Marcus Furius Bibaculus"},
"LAT0418": {"works": ["001", "002"], "name": "Titus Quinctius Atta"},
"LAT0300": {"works": ["001"], "name": "Sempronius Asellio"},
"LAT0496": {"works": ["001"], "name": "Commentarius Anquisit. Sergii"},
"LAT0106": {"works": ["001"], "name": "Caecilius Metellus"},
"LAT0917": {"works": ["001", "002"], "name": "Marcus Annaeus Lucanus"},
"LAT0656": {"works": ["002", "003"], "name": "Servius Sulpicius Rufus"},
"LAT0502": {"works": ["001"], "name": "Aulus Cremutius Cordus"},
"LAT1345": {"works": ["001"], "name": "Silius Italicus"},
"LAT0869": {"works": ["001", "002"], "name": "Marcus Verrius Flaccus"},
"LAT0535": {"works": ["001"], "name": "Marcus Iuventius Laterensis"},
"LAT0682": {"works": ["001", "002"], "name": "Lucius Varius Rufus"},
"LAT1056": {"works": ["001"], "name": "Vitruvius"},
"LAT1005": {"works": ["001"], "name": "Rabirius"},
"LAT0070": {"works": ["001"], "name": "Gnaeus Gellius"},
"LAT1370": {
"works": ["001", "002", "003", "004"],
"name": "Quintus Terentius Scaurus",
},
"LAT1374": {"works": ["001"], "name": "Velius Longus"},
"LAT2150": {"works": ["001"], "name": "Zeno of Verona"},
"LAT0727": {"works": ["001"], "name": "Cornificius Longus"},
"LAT0530": {"works": ["001", "002"], "name": "Aulus Hirtius"},
"LAT0929": {"works": ["001"], "name": "Pomponius Mela"},
"LAT0082": {"works": ["001"], "name": "Decimus Iunius Silanus"},
"LAT0634": {"works": ["001", "002"], "name": "Santra"},
"LAT0863": {"works": ["001"], "name": "Dorcatius"},
"LAT0302": {"works": ["001"], "name": "Marcus Antonius"},
"LAT0800": {"works": ["001"], "name": "Albinovanus Pedo"},
"LAT0116": {"works": ["001", "002"], "name": "Marcus Pacuvius"},
"LAT0590": {"works": ["001"], "name": "Publius Nigidius Figulus"},
"LAT1041": {"works": ["001"], "name": "Pseudo-Varro"},
"LAT1023": {"works": ["001", "002"], "name": "Sulpicia, Caleni uxor"},
"LAT0560": {"works": ["001"], "name": "Helvius Mancia"},
"LAT0664": {"works": ["001"], "name": "Gaius Trebatius Testa"},
"LAT0013": {"works": ["001"], "name": "Caecilius Statius"},
"LAT0450": {"works": ["001"], "name": "Lucius Iulius Caesar"},
"LAT0706": {"works": ["001"], "name": "Carmen de Bello Aegyptiaco"},
"LAT1035": {"works": ["001"], "name": "Gaius Valerius Flaccus"},
"LAT0140": {"works": ["001"], "name": "Gaius Titius"},
"LAT2300": {"works": ["001"], "name": "Aemilius Sura"},
"LAT0404": {"works": ["001"], "name": "Lucius Afranius"},
"LAT1029": {"works": ["001"], "name": "Turnus"},
"LAT0676": {"works": ["001"], "name": "Valerius Antias"},
"LAT0866": {"works": ["001"], "name": "Fenestella"},
"LAT1053": {"works": ["001"], "name": "Vibius Crispus"},
"LAT0472": {"works": ["001", "002"], "name": "Gaius Valerius Catullus"},
"LAT0857": {"works": ["001"], "name": "Lucius Annaeus Cornutus"},
"LAT1251": {"works": ["001", "002", "004"], "name": "Gaius, iur., Gaius"},
"LAT0630": {"works": ["001"], "name": "Sacra Argeorum"},
"LAT0536": {"works": ["001"], "name": "Decimus Laberius"},
"LAT0638": {"works": ["002"], "name": "Q. Mucius Scaevola [pontifex]"},
"LAT0914": {"works": ["001", "002", "003", "004"], "name": "Titus Livius, Livy"},
"LAT0494": {"works": ["001"], "name": "Commentarii Consulares"},
"LAT0812": {
"works": ["001", "002", "003", "004", "005", "006", "007"],
"name": "Gaius Caesius Bassus",
},
"LAT0990": {"works": ["001"], "name": "Precatio Omnium Herbarum"},
"LAT0401": {"works": ["001"], "name": "Aufustius"},
"LAT0091": {"works": ["001"], "name": "Licinius Imbrex"},
"LAT0660": {"works": ["001", "002"], "name": "Albius Tibullus"},
"LAT0085": {"works": ["001"], "name": "Gaius Laelius Sapiens"},
"LAT0420": {"works": ["001"], "name": "Publius Aufidius Namusa"},
"LAT0875": {"works": ["001"], "name": "Cn. Cornel. Lentulus Gaetulicus"},
"LAT0550": {"works": ["001", "002", "003"], "name": "Titus Lucretius Carus"},
"LAT0425": {"works": ["001"], "name": "Publius Rutilius Lupus"},
"LAT1038": {"works": ["001"], "name": "Valerius Maximus"},
"LAT0406": {"works": ["002"], "name": "Publius Alfenus Varus"},
"LAT1276": {"works": ["001"], "name": "Decimus Iunius Iuvenalis, Juvenal"},
"LAT0541": {"works": ["001"], "name": "Cn. Cornel. Lentulus Marcell."},
"LAT0025": {"works": ["001"], "name": "Marcus Porcius Cato M.f.M.n."},
"LAT0498": {"works": ["001"], "name": "Gaius Aurelius Cotta"},
"LAT0034": {"works": ["001"], "name": "Gaius Scribonius Curio avus"},
"LAT0094": {"works": ["001", "002", "003"], "name": "Lucius Livius Andronicus"},
"LAT0721": {"works": ["001"], "name": "Antonius Panurgus"},
"LAT0500": {"works": ["001"], "name": "Lucius Licinius Crassus"},
"LAT0648": {"works": ["001"], "name": "Staberius Eros"},
"LAT1209": {"works": ["001"], "name": "Annianus"},
"LAT0487": {"works": ["001"], "name": "Publius Clodius Pulcher"},
"LAT0686": {"works": ["001"], "name": "P. Terentius Varro Atacinus"},
"LAT1377": {
"works": ["001", "002", "003", "004", "005", "006", "007", "008"],
"name": "Fragmenta Bobiensia",
},
"LAT0652": {"works": ["001"], "name": "Lucius Cornelius Sulla"},
"LAT0510": {"works": ["002"], "name": "Publius Cornelius Dolabella"},
"LAT0661": {"works": ["001"], "name": "Ticidas"},
"LAT0488": {"works": ["001"], "name": "Servius Clodius"},
"LAT0624": {"works": ["001"], "name": "Quintus Claudius Quadrigarius"},
"LAT2003": {"works": ["001", "002", "003"], "name": "Caelius Apicius"},
"LAT1357": {"works": ["002"], "name": "Imp. Marcus Ulpius Traianus, Trajan"},
"LAT1235": {
"works": ["001", "002", "003", "004", "005", "006"],
"name": "Didascaliae et Per. in Terentium",
},
"LAT0724": {"works": ["001"], "name": "Cloatius Verus"},
"LAT0938": {"works": ["001"], "name": "Iulius Montanus"},
"LAT1500": {"works": ["001"], "name": "Altercatio Hadr. et Epicteti"},
"LAT0582": {"works": ["001"], "name": "Q. Caecilius Metellus Numid."},
"LAT1327": {"works": ["001"], "name": "Sabidius"},
"LAT0972": {"works": ["001", "002"], "name": "Petronius"},
"LAT2335": {"works": ["001"], "name": "Anonymi de Differentiis [Fronto]"},
"LAT2349": {
"works": ["001", "002", "003", "004", "005", "006", "007"],
"name": "Maurus Servius Honoratus, Servius",
},
"LAT1515": {"works": ["001", "002"], "name": "Quintus Serenus (Sammonicus)"},
"LAT0022": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
],
"name": "Marcus Porcius Cato, Cato",
},
"LAT0412": {"works": ["001"], "name": "Gaius Aquilius Gallus"},
"LAT2028": {"works": ["001"], "name": "Chalcidius"},
"LAT0993": {"works": ["001"], "name": "Precatio Terrae"},
"LAT0678": {"works": ["001"], "name": "Quintus Valerius Soranus"},
"LAT1236": {"works": ["001"], "name": "Sextus Pompeius Festus"},
"LAT0851": {"works": ["001", "002"], "name": "Cornelius Severus"},
"LAT0908": {"works": ["001"], "name": "Attius Labeo"},
"LAT0635": {"works": ["001"], "name": "Publius Saturius"},
"LAT0118": {"works": ["001"], "name": "L. Aemilius L.f.M.n. Paulus"},
"LAT0518": {"works": ["001"], "name": "Aulus Furius Antias"},
"LAT0423": {"works": ["001"], "name": "Lucius Herennius Balbus"},
"LAT0436": {"works": ["002"], "name": "Marcus Iunius Brutus [tyr.]"},
"LAT1351": {
"works": ["001", "002", "003", "004", "005"],
"name": "Cornelius Tacitus",
},
"LAT9254": {"works": ["001"], "name": "Titius, gram."},
"LAT0644": {"works": ["001"], "name": "Sextilius Ena"},
"LAT0978": {"works": ["001", "002"], "name": "Gaius Plinius Secundus, Pliny"},
"LAT0430": {"works": ["001"], "name": "Bellum Hispaniense [Anonymous]"},
"LAT0526": {"works": ["001"], "name": "Gaius Servilius Glaucia"},
"LAT1339": {"works": ["001"], "name": "Septimius Serenus"},
"LAT0926": {"works": ["001"], "name": "Marcus Manilius, Manilius"},
"LAT0815": {"works": ["001"], "name": "Bruttedius Niger"},
"LAT2456": {"works": ["001"], "name": "Parthenius Presbyter"},
"LAT0600": {"works": ["001", "002"], "name": "Gaius Oppius"},
"LAT1218": {"works": ["001"], "name": "Sentius Augurinus"},
"LAT9500": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
"019",
"020",
"021",
"022",
"023",
"024",
"025",
"026",
"027",
"028",
"029",
"030",
"031",
"032",
"033",
"034",
"035",
"036",
"037",
"038",
"039",
"040",
"041",
],
"name": "Anonymi Epici et Lyrici",
},
"LAT1224": {"works": ["001"], "name": "Marcus Aurelius"},
"LAT0122": {"works": ["001"], "name": "Aulus Postumius Albinus"},
"LAT1672": {"works": ["001"], "name": "Iulius Valerius"},
"LAT0534": {"works": ["001"], "name": "Iuventius, comoed."},
"LAT0097": {"works": ["001"], "name": "Gaius Lucilius"},
"LAT0408": {"works": ["002"], "name": "Marcus Antonius triumvir"},
"LAT1100": {"works": ["001"], "name": "Calpurnius Flaccus"},
"LAT1234": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
"019",
],
"name": "Didascaliae et Argum. in Plautum",
},
"LAT1002": {"works": ["001", "002", "003"], "name": "Marcus Fabius Quintilianus"},
"LAT0016": {"works": ["001"], "name": "Lucius Calpurnius Piso Frugi"},
"LAT0944": {"works": ["001"], "name": "Imperator Nero"},
"LAT0400": {"works": ["001", "002", "003"], "name": "Lucius Accius"},
"LAT0046": {"works": ["001"], "name": "Cornelius Epicadus"},
"LAT0486": {"works": ["001"], "name": "Gaius Helvius Cinna"},
"LAT2301": {"works": ["001"], "name": "Q. Aurelius Memmius Symmachus"},
"LAT0854": {"works": ["001"], "name": "Cornificius Gallus"},
"LAT1221": {
"works": ["001", "002", "003", "004", "005", "006", "007", "008"],
"name": "C. Iul. Caes. Augustus Octavianus",
},
"LAT0414": {"works": ["001"], "name": "Lucius Arruntius"},
"LAT0538": {"works": ["001", "002"], "name": "Laevius"},
"LAT0533": {"works": ["001", "002"], "name": "Gaius Iulius Hyginus"},
"LAT0061": {"works": ["001", "002"], "name": "Fabius Pictor"},
"LAT3211": {
"works": ["001", "002", "003", "004"],
"name": "Argum. Aen. et Tetrast.",
},
"LAT0658": {"works": ["001"], "name": "Tabulae Censoriae"},
"LAT1242": {"works": ["001", "002", "003", "004", "005"], "name": "Annius Florus"},
"LAT0456": {"works": ["001", "002"], "name": "Gaius Licinius Macer Calvus"},
"LAT0402": {"works": ["001"], "name": "Valerius Aedituus"},
"LAT0031": {"works": ["001"], "name": "Cornelia, mater Gracchorum"},
"LAT0490": {"works": ["001"], "name": "Publius Cominius"},
"LAT0455": {"works": ["001"], "name": "Gaius Calpurnius Piso"},
"LAT0019": {"works": ["001"], "name": "Gaius Papirius Carbo"},
"LAT1908": {"works": ["001"], "name": "Gallus Antipater"},
"LAT0562": {"works": ["001"], "name": "Manilius, poet."},
"LAT0836": {"works": ["001", "002", "003"], "name": "Aulus Cornelius Celsus"},
"LAT0690": {
"works": ["001", "002", "003"],
"name": "Publius Vergilius Maro, Virgil, Vergil",
},
"LAT0620": {"works": ["001"], "name": "Sextus Propertius"},
"LAT0473": {"works": ["001"], "name": "Q. Lutatius Catulus iunior"},
"LAT0932": {"works": ["001", "002"], "name": "M. Valerius Messalla Corvinus"},
"LAT0576": {"works": ["001", "002"], "name": "M. Valerius Messalla Rufus"},
"LAT0935": {"works": ["001"], "name": "Iulius Modestus"},
"LAT0058": {"works": ["001"], "name": "Q. Fabius Maximus Servilianus"},
"LAT0680": {"works": ["001"], "name": "Gaius Valgius Rufus"},
"LAT0556": {"works": ["001", "002"], "name": "Gaius Licinius Macer"},
"LAT0628": {"works": ["001"], "name": "Publius Rutilius Rufus"},
"LAT0615": {"works": ["001"], "name": "Q. Pompeius Q.f.Q.n. Rufus"},
"LAT1032": {"works": ["001"], "name": "Vagellius"},
"LAT0622": {"works": ["001", "002"], "name": "Publilius Syrus"},
"LAT1050": {"works": ["001"], "name": "Lucius Verginius Rufus"},
"LAT0004": {"works": ["001"], "name": "Appius Claudius Caecus"},
"LAT0803": {
"works": ["001", "002", "003", "004", "005"],
"name": "Quintus Asconius Pedianus",
},
"LAT0552": {"works": ["001", "002"], "name": "Quintus Lutatius Catulus"},
"LAT0005": {"works": ["001"], "name": "Aquilius, comoed."},
"LAT1321": {"works": ["002"], "name": "Sextus Pomponius"},
"LAT0112": {
"works": ["001", "002", "003", "004", "005", "006", "007"],
"name": "Gnaeus Naevius",
},
"LAT1297": {"works": ["001"], "name": "Marullus"},
"LAT2302": {"works": ["001"], "name": "L. Aurel. Avianius Symmachus"},
"LAT0492": {"works": ["001"], "name": "Commentarii Augurum"},
"LAT0413": {"works": ["001", "002"], "name": "Gavius Bassus"},
"LAT0586": {"works": ["001"], "name": "Mummius"},
"LAT0312": {"works": ["001"], "name": "Fabius Dossennus"},
"LAT0522": {"works": ["001", "002"], "name": "Gaius Aelius Gallus"},
"LAT0137": {"works": ["001"], "name": "Titinius"},
"LAT0966": {"works": ["001"], "name": "Passienus Crispus"},
"LAT0007": {"works": ["001"], "name": "Atilius"},
"LAT0419": {"works": ["001"], "name": "Lucius Orbilius Pupillus"},
"LAT0119": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
"019",
"020",
"021",
"022",
],
"name": "Titus Maccius Plautus",
},
"LAT0540": {"works": ["001"], "name": "Tullius Laurea"},
"LAT0594": {"works": ["001"], "name": "Lucius Novius"},
"LAT1047": {"works": ["001"], "name": "Veranius"},
"LAT0410": {"works": ["001"], "name": "Aprissius (?)"},
"LAT0010": {"works": ["001"], "name": "Marcus Iunius Brutus [iur.]"},
"LAT0428": {"works": ["001"], "name": "Bellum Alexandrinum [Anonymous]"},
"LAT0128": {"works": ["001"], "name": "P. Cornel. Scipio Aem. Afr."},
"LAT0890": {"works": ["001"], "name": "Homerus Latinus"},
"LAT0446": {"works": ["001"], "name": "Quintus Servilius Caepio"},
"LAT0887": {"works": ["001"], "name": "Grattius"},
"LAT0905": {"works": ["002"], "name": "Marcus Antistius Labeo"},
"LAT1257": {"works": ["001"], "name": "Granius Licinianus"},
"LAT0079": {"works": ["001"], "name": "Hostius"},
"LAT1020": {
"works": ["001", "002", "003", "004"],
"name": "Publius Papinius Statius",
},
"LAT1348": {
"works": ["001", "002", "003", "004", "005", "006"],
"name": "Gaius Suetonius Tranquillus",
},
"LAT0027": {"works": ["001"], "name": "Lucius Cincius Alimentus"},
"LAT0668": {"works": ["001"], "name": "Gnaeus Tremelius Scrofa"},
"LAT0103": {"works": ["001"], "name": "Gnaeus Marcius vates"},
"LAT1203": {"works": ["001"], "name": "Alfius Avitus"},
"LAT0330": {"works": ["001"], "name": "Volcacius Sedigitus"},
"LAT9969": {"works": ["001"], "name": "Vita Iuvenalis"},
"LAT0642": {"works": ["001"], "name": "Sevius Nicanor"},
"LAT1014": {
"works": ["001", "002", "003", "004"],
"name": "Lucius Annaeus Seneca senior",
},
"LAT0591": {"works": ["001"], "name": "Ninnius Crassus"},
"LAT2123": {"works": ["003"], "name": "Publilius Optatianus Porfyrius"},
"LAT1212": {
"works": ["001", "002", "003", "004", "005", "006", "007", "008", "009", "010"],
"name": "Apuleius Madaurensis",
},
"LAT0584": {"works": ["001", "002"], "name": "Mimi Poetarum Incertorum"},
"LAT0923": {"works": ["001", "002"], "name": "Aemilius Macer"},
"LAT1518": {"works": ["001"], "name": "Terentianus Maurus"},
"LAT0452": {"works": ["001", "002"], "name": "Gaius Iulius Caesar Strabo"},
"LAT0860": {"works": ["001"], "name": "Quintus Curtius Rufus"},
"LAT0625": {"works": ["001"], "name": "Lucius Quinctius"},
"LAT1336": {"works": ["001"], "name": "Scaevus Memor"},
"LAT0426": {"works": ["001"], "name": "Bellum Africum [Anonymous]"},
"LAT0821": {"works": ["001"], "name": "Bucolica Einsidlensia"},
"LAT0143": {"works": ["001"], "name": "Trabea"},
"LAT0100": {"works": ["001"], "name": "Luscius Lanuvinus"},
"LAT2097": {"works": ["001"], "name": "Sextus Paconianus"},
"LAT9221": {"works": ["001"], "name": "Paulus Quaestor"},
"LAT0606": {"works": ["001"], "name": "Lucius Marcius Philippus"},
"LAT0809": {"works": ["001"], "name": "Aufidius Bassus"},
"LAT0987": {"works": ["001", "002"], "name": "Publius Pomponius Secundus"},
"LAT0527": {"works": ["001"], "name": "Gannius"},
"LAT0709": {"works": ["001", "002"], "name": "Domitius Marsus"},
"LAT1103": {"works": ["001"], "name": "Priapea"},
"LAT1248": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
],
"name": "Marcus Cornelius Fronto",
},
"LAT0614": {"works": ["001"], "name": "Q. Pompeius Q.f.A.n. Rufus"},
"LAT0478": {"works": ["002", "003"], "name": "Quintus Tullius Cicero"},
"LAT0125": {"works": ["001"], "name": "Publius Mucius Scaevola"},
"LAT0911": {"works": ["001"], "name": "Laus Pisonis"},
"LAT0616": {"works": ["001", "002"], "name": "Pompilius"},
"LAT0073": {"works": ["001"], "name": "Gaius Sempronius Gracchus"},
"LAT0618": {"works": ["001"], "name": "Lucius Pomponius Bononiensis"},
"LAT1279": {"works": ["001"], "name": "Laelius Felix"},
"LAT0640": {"works": ["001", "002"], "name": "Marcus Aemilius Scaurus"},
"LAT1512": {
"works": ["001", "002", "003", "004", "005", "006", "007"],
"name": "Pomponius Porphyrio",
},
"LAT0037": {"works": ["001"], "name": "Gaius Scribonius Curio pater"},
"LAT1000": {"works": ["001"], "name": "Pupius (?)"},
"LAT1282": {"works": ["001"], "name": "Lentulus, mimus"},
"LAT0002": {"works": ["001"], "name": "Titus Annius Luscus"},
"LAT0830": {"works": ["001"], "name": "Titus Calpurnius Siculus"},
"LAT2434": {"works": ["001"], "name": "Hilarius Arelatensis"},
"LAT1318": {
"works": ["001", "002", "003"],
"name": "C. Plinius Caecilius Secundus, Pliny",
},
"LAT1342": {"works": ["001"], "name": "Siculus Flaccus"},
"LAT0694": {"works": ["001"], "name": "Volumnius"},
"LAT0454": {"works": ["001"], "name": "Marcus Calidius"},
"LAT1227": {"works": ["001"], "name": "Balbus, grom."},
"LAT0134": {
"works": ["001", "002", "003", "004", "005", "006"],
"name": "Publius Terentius Afer, Terence",
},
"LAT0564": {"works": ["001"], "name": "Manius Manilius"},
"LAT0646": {"works": ["001", "002"], "name": "Lucius Cornelius Sisenna"},
"LAT0303": {"works": ["001"], "name": "Aurelius Opillus"},
"LAT0474": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
"019",
"020",
"021",
"022",
"023",
"024",
"025",
"026",
"027",
"028",
"029",
"030",
"031",
"032",
"033",
"034",
"035",
"036",
"037",
"038",
"039",
"040",
"041",
"042",
"043",
"044",
"045",
"046",
"047",
"048",
"049",
"050",
"051",
"052",
"053",
"054",
"055",
"056",
"057",
"058",
"059",
"060",
"061",
"062",
"063",
"064",
"065",
"066",
"067",
"068",
"069",
"070",
"071",
"072",
"073",
"074",
"075",
],
"name": "Marcus Tullius Cicero, Cicero, Tully",
},
"LAT0460": {"works": ["001"], "name": "Gaius Papirius Carbo Arvina"},
"LAT1506": {"works": ["001"], "name": "Anonymi Fragmenta de Iure Fisci"},
"LAT0827": {"works": ["001"], "name": "Caesellius Vindex"},
"LAT0963": {"works": ["001", "002"], "name": "Quintus Remmius Palaemon"},
"LAT1254": {"works": ["001"], "name": "Aulus Gellius"},
"LAT0448": {
"works": ["001", "002", "003", "004", "005", "006", "007", "008"],
"name": "Gaius Iulius Caesar, Caesar",
},
"LAT0878": {"works": ["001", "002"], "name": "Gaius Asinius Gallus"},
"LAT0043": {
"works": ["001", "002", "003", "004", "005", "006", "007"],
"name": "Quintus Ennius",
},
"LAT0064": {"works": ["001", "002"], "name": "Gaius Fannius"},
"LAT1229": {"works": ["001", "002"], "name": "Flavius Caper"},
"LAT0469": {"works": ["001"], "name": "Lucius Cassius Longinus"},
"LAT0692": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
],
"name": "Appendix Vergiliana",
},
"LAT0684": {
"works": [
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
],
"name": "Marcus Terentius Varro, Varro",
},
"LAT0558": {"works": ["001", "002"], "name": "Gaius Cilnius Maecenas"},
"LAT2000": {"works": ["001"], "name": "Ablabius"},
"LAT0321": {"works": ["001"], "name": "Porcius Licinus"},
}
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
from collections import Counter
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import validate_email_address, strip_html, cstr, time_diff_in_seconds
from frappe.core.doctype.communication.email import validate_email, notify, _notify
from frappe.core.utils import get_parent_doc
from frappe.utils.bot import BotReply
from frappe.utils import parse_addr
from frappe.core.doctype.comment.comment import update_comment_in_doc
from email.utils import parseaddr
from six.moves.urllib.parse import unquote
from frappe.utils.user import is_system_user
from frappe.contacts.doctype.contact.contact import get_contact_name
from frappe.automation.doctype.assignment_rule.assignment_rule import apply as apply_assignment_rule
exclude_from_linked_with = True
class Communication(Document):
no_feed_on_delete = True
"""Communication represents an external communication like Email."""
def onload(self):
"""create email flag queue"""
if self.communication_type == "Communication" and self.communication_medium == "Email" \
and self.sent_or_received == "Received" and self.uid and self.uid != -1:
email_flag_queue = frappe.db.get_value("Email Flag Queue", {
"communication": self.name,
"is_completed": 0})
if email_flag_queue:
return
frappe.get_doc({
"doctype": "Email Flag Queue",
"action": "Read",
"communication": self.name,
"uid": self.uid,
"email_account": self.email_account
}).insert(ignore_permissions=True)
frappe.db.commit()
def validate(self):
self.validate_reference()
if not self.user:
self.user = frappe.session.user
if not self.subject:
self.subject = strip_html((self.content or "")[:141])
if not self.sent_or_received:
self.seen = 1
self.sent_or_received = "Sent"
self.set_status()
validate_email(self)
if self.communication_medium == "Email":
self.parse_email_for_timeline_links()
self.set_timeline_links()
self.deduplicate_timeline_links()
self.set_sender_full_name()
def validate_reference(self):
if self.reference_doctype and self.reference_name:
if not self.reference_owner:
self.reference_owner = frappe.db.get_value(self.reference_doctype, self.reference_name, "owner")
# prevent communication against a child table
if frappe.get_meta(self.reference_doctype).istable:
frappe.throw(_("Cannot create a {0} against a child document: {1}")
.format(_(self.communication_type), _(self.reference_doctype)))
# Prevent circular linking of Communication DocTypes
if self.reference_doctype == "Communication":
circular_linking = False
doc = get_parent_doc(self)
while doc.reference_doctype == "Communication":
if get_parent_doc(doc).name==self.name:
circular_linking = True
break
doc = get_parent_doc(doc)
if circular_linking:
frappe.throw(_("Please make sure the Reference Communication Docs are not circularly linked."), frappe.CircularLinkingError)
def after_insert(self):
if not (self.reference_doctype and self.reference_name):
return
if self.reference_doctype == "Communication" and self.sent_or_received == "Sent":
frappe.db.set_value("Communication", self.reference_name, "status", "Replied")
if self.communication_type == "Communication":
# send new comment to listening clients
frappe.publish_realtime('new_communication', self.as_dict(),
doctype=self.reference_doctype, docname=self.reference_name,
after_commit=True)
elif self.communication_type in ("Chat", "Notification", "Bot"):
if self.reference_name == frappe.session.user:
message = self.as_dict()
message['broadcast'] = True
frappe.publish_realtime('new_message', message, after_commit=True)
else:
# reference_name contains the user who is addressed in the messages' page comment
frappe.publish_realtime('new_message', self.as_dict(),
user=self.reference_name, after_commit=True)
def on_update(self):
# add to _comment property of the doctype, so it shows up in
# comments count for the list view
update_comment_in_doc(self)
if self.comment_type != 'Updated':
update_parent_document_on_communication(self)
self.bot_reply()
def on_trash(self):
if self.communication_type == "Communication":
# send delete comment to listening clients
frappe.publish_realtime('delete_communication', self.as_dict(),
doctype= self.reference_doctype, docname = self.reference_name,
after_commit=True)
def set_status(self):
if not self.is_new():
return
if self.reference_doctype and self.reference_name:
self.status = "Linked"
elif self.communication_type=="Communication":
self.status = "Open"
else:
self.status = "Closed"
# set email status to spam
email_rule = frappe.db.get_value("Email Rule", { "email_id": self.sender, "is_spam":1 })
if self.communication_type == "Communication" and self.communication_medium == "Email" \
and self.sent_or_received == "Sent" and email_rule:
self.email_status = "Spam"
def set_sender_full_name(self):
if not self.sender_full_name and self.sender:
if self.sender == "Administrator":
self.sender_full_name = frappe.db.get_value("User", "Administrator", "full_name")
self.sender = frappe.db.get_value("User", "Administrator", "email")
elif self.sender == "Guest":
self.sender_full_name = self.sender
self.sender = None
else:
if self.sent_or_received=='Sent':
validate_email_address(self.sender, throw=True)
sender_name, sender_email = parse_addr(self.sender)
if sender_name == sender_email:
sender_name = None
self.sender = sender_email
self.sender_full_name = sender_name
if not self.sender_full_name:
self.sender_full_name = frappe.db.get_value('User', self.sender, 'full_name')
if not self.sender_full_name:
first_name, last_name = frappe.db.get_value('Contact',
filters={'email_id': sender_email},
fieldname=['first_name', 'last_name']
) or [None, None]
self.sender_full_name = (first_name or '') + (last_name or '')
if not self.sender_full_name:
self.sender_full_name = sender_email
def send(self, print_html=None, print_format=None, attachments=None,
send_me_a_copy=False, recipients=None):
"""Send communication via Email.
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
self.send_me_a_copy = send_me_a_copy
self.notify(print_html, print_format, attachments, recipients)
def notify(self, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None,fetched_from_email_account=False):
"""Calls a delayed task 'sendmail' that enqueus email in Email Queue queue
:param print_html: Send given value as HTML attachment
:param print_format: Attach print format of parent document
:param attachments: A list of filenames that should be attached when sending this email
:param recipients: Email recipients
:param cc: Send email as CC to
:param fetched_from_email_account: True when pulling email, the notification shouldn't go to the main recipient
"""
notify(self, print_html, print_format, attachments, recipients, cc, bcc,
fetched_from_email_account)
def _notify(self, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None):
_notify(self, print_html, print_format, attachments, recipients, cc, bcc)
def bot_reply(self):
if self.comment_type == 'Bot' and self.communication_type == 'Chat':
reply = BotReply().get_reply(self.content)
if reply:
frappe.get_doc({
"doctype": "Communication",
"comment_type": "Bot",
"communication_type": "Bot",
"content": cstr(reply),
"reference_doctype": self.reference_doctype,
"reference_name": self.reference_name
}).insert()
frappe.local.flags.commit = True
def set_delivery_status(self, commit=False):
'''Look into the status of Email Queue linked to this Communication and set the Delivery Status of this Communication'''
delivery_status = None
status_counts = Counter(frappe.db.sql_list('''select status from `tabEmail Queue` where communication=%s''', self.name))
if self.sent_or_received == "Received":
return
if status_counts.get('Not Sent') or status_counts.get('Sending'):
delivery_status = 'Sending'
elif status_counts.get('Error'):
delivery_status = 'Error'
elif status_counts.get('Expired'):
delivery_status = 'Expired'
elif status_counts.get('Sent'):
delivery_status = 'Sent'
if delivery_status:
self.db_set('delivery_status', delivery_status)
frappe.publish_realtime('update_communication', self.as_dict(),
doctype=self.reference_doctype, docname=self.reference_name, after_commit=True)
# for list views and forms
self.notify_update()
if commit:
frappe.db.commit()
def parse_email_for_timeline_links(self):
parse_email(self, [self.recipients, self.cc, self.bcc])
# Timeline Links
def set_timeline_links(self):
contacts = []
if (self.email_account and frappe.db.get_value("Email Account", self.email_account, "create_contact")) or \
frappe.flags.in_test:
contacts = get_contacts([self.sender, self.recipients, self.cc, self.bcc])
for contact_name in contacts:
self.add_link('Contact', contact_name)
#link contact's dynamic links to communication
add_contact_links_to_communication(self, contact_name)
def deduplicate_timeline_links(self):
if self.timeline_links:
links, duplicate = [], False
for l in self.timeline_links:
t = (l.link_doctype, l.link_name)
if not t in links:
links.append(t)
else:
duplicate = True
if duplicate:
del self.timeline_links[:] # make it python 2 compatible as list.clear() is python 3 only
for l in links:
self.add_link(link_doctype=l[0], link_name=l[1])
def add_link(self, link_doctype, link_name, autosave=False):
self.append("timeline_links",
{
"link_doctype": link_doctype,
"link_name": link_name
}
)
if autosave:
self.save(ignore_permissions=True)
def get_links(self):
return self.timeline_links
def remove_link(self, link_doctype, link_name, autosave=False, ignore_permissions=True):
for l in self.timeline_links:
if l.link_doctype == link_doctype and l.link_name == link_name:
self.timeline_links.remove(l)
if autosave:
self.save(ignore_permissions=ignore_permissions)
def on_doctype_update():
"""Add indexes in `tabCommunication`"""
frappe.db.add_index("Communication", ["reference_doctype", "reference_name"])
frappe.db.add_index("Communication", ["status", "communication_type"])
def has_permission(doc, ptype, user):
if ptype=="read":
if doc.reference_doctype == "Communication" and doc.reference_name == doc.name:
return
if doc.reference_doctype and doc.reference_name:
if frappe.has_permission(doc.reference_doctype, ptype="read", doc=doc.reference_name):
return True
def get_permission_query_conditions_for_communication(user):
if not user: user = frappe.session.user
roles = frappe.get_roles(user)
if "Super Email User" in roles or "System Manager" in roles:
return None
else:
accounts = frappe.get_all("User Email", filters={ "parent": user },
fields=["email_account"],
distinct=True, order_by="idx")
if not accounts:
return """`tabCommunication`.communication_medium!='Email'"""
email_accounts = [ '"%s"'%account.get("email_account") for account in accounts ]
return """`tabCommunication`.email_account in ({email_accounts})"""\
.format(email_accounts=','.join(email_accounts))
def get_contacts(email_strings):
email_addrs = []
for email_string in email_strings:
if email_string:
for email in email_string.split(","):
parsed_email = parseaddr(email)[1]
if parsed_email:
email_addrs.append(parsed_email)
contacts = []
for email in email_addrs:
email = get_email_without_link(email)
contact_name = get_contact_name(email)
if not contact_name and email:
email_parts = email.split("@")
first_name = frappe.unscrub(email_parts[0])
try:
contact_name = '{0}-{1}'.format(first_name, email_parts[1]) if first_name == 'Contact' else first_name
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": contact_name,
"name": contact_name
})
contact.add_email(email_id=email, is_primary=True)
contact.insert(ignore_permissions=True)
contact_name = contact.name
except Exception:
traceback = frappe.get_traceback()
frappe.log_error(traceback)
if contact_name:
contacts.append(contact_name)
return contacts
def add_contact_links_to_communication(communication, contact_name):
contact_links = frappe.get_list("Dynamic Link", filters={
"parenttype": "Contact",
"parent": contact_name
}, fields=["link_doctype", "link_name"])
if contact_links:
for contact_link in contact_links:
communication.add_link(contact_link.link_doctype, contact_link.link_name)
def parse_email(communication, email_strings):
"""
Parse email to add timeline links.
When automatic email linking is enabled, an email from email_strings can contain
a doctype and docname ie in the format `admin+doctype+docname@example.com`,
the email is parsed and doctype and docname is extracted and timeline link is added.
"""
if not frappe.get_all("Email Account", filters={"enable_automatic_linking": 1}):
return
delimiter = "+"
for email_string in email_strings:
if email_string:
for email in email_string.split(","):
if delimiter in email:
email = email.split("@")[0]
email_local_parts = email.split(delimiter)
if not len(email_local_parts) == 3:
continue
doctype = unquote(email_local_parts[1])
docname = unquote(email_local_parts[2])
if doctype and docname and frappe.db.exists(doctype, docname):
communication.add_link(doctype, docname)
def get_email_without_link(email):
"""
returns email address without doctype links
returns admin@example.com for email admin+doctype+docname@example.com
"""
if not frappe.get_all("Email Account", filters={"enable_automatic_linking": 1}):
return email
email_id = email.split("@")[0].split("+")[0]
email_host = email.split("@")[1]
return "{0}@{1}".format(email_id, email_host)
def update_parent_document_on_communication(doc):
"""Update mins_to_first_communication of parent document based on who is replying."""
parent = get_parent_doc(doc)
if not parent:
return
# update parent mins_to_first_communication only if we create the Email communication
# ignore in case of only Comment is added
if doc.communication_type == "Comment":
return
status_field = parent.meta.get_field("status")
if status_field:
options = (status_field.options or "").splitlines()
# if status has a "Replied" option, then update the status for received communication
if ("Replied" in options) and doc.sent_or_received == "Received":
parent.db_set("status", "Open")
parent.run_method("handle_hold_time", "Replied")
apply_assignment_rule(parent)
else:
# update the modified date for document
parent.update_modified()
update_first_response_time(parent, doc)
set_avg_response_time(parent, doc)
parent.run_method("notify_communication", doc)
parent.notify_update()
def update_first_response_time(parent, communication):
if parent.meta.has_field("first_response_time") and not parent.get("first_response_time"):
if is_system_user(communication.sender):
first_responded_on = communication.creation
if parent.meta.has_field("first_responded_on") and communication.sent_or_received == "Sent":
parent.db_set("first_responded_on", first_responded_on)
parent.db_set("first_response_time", round(time_diff_in_seconds(first_responded_on, parent.creation), 2))
def set_avg_response_time(parent, communication):
if parent.meta.has_field("avg_response_time") and communication.sent_or_received == "Sent":
# avg response time for all the responses
communications = frappe.get_list("Communication", filters={
"reference_doctype": parent.doctype,
"reference_name": parent.name
},
fields=["sent_or_received", "name", "creation"],
order_by="creation"
)
if len(communications):
response_times = []
for i in range(len(communications)):
if communications[i].sent_or_received == "Sent" and communications[i-1].sent_or_received == "Received":
response_time = round(time_diff_in_seconds(communications[i].creation, communications[i-1].creation), 2)
if response_time > 0:
response_times.append(response_time)
if response_times:
avg_response_time = sum(response_times) / len(response_times)
parent.db_set("avg_response_time", avg_response_time)
| |
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import subprocess
import tempfile
import time
from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.drivers import utils as driver_utils
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
VALID_PROTO_VERSIONS = ('2.0', '1.5')
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_port': _("remote IPMI RMCP port. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional."),
'ipmi_protocol_version': _('the version of the IPMI protocol; default '
'is "2.0". One of "1.5", "2.0". Optional.'),
'ipmi_force_boot_device': _("Whether Ironic should specify the boot "
"device to the BMC each time the server "
"is turned on, eg. because the BMC is not "
"capable of remembering the selected boot "
"device across power cycles; default value "
"is False. Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
# NOTE(cinerama): use subprocess.check_call to
# check options & suppress ipmitool output to
# avoid alarming people
with open(os.devnull, 'wb') as nullfile:
subprocess.check_call(cmd, stdout=nullfile,
stderr=nullfile)
except subprocess.CalledProcessError:
LOG.info(_LI("Option %(opt)s is not supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, False)
else:
LOG.info(_LI("Option %(opt)s is supported by ipmitool"),
{'opt': opt})
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(CONF.tempdir, file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
with excutils.save_and_reraise_exception():
if f is not None:
f.close()
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = six.text_type(info.get('ipmi_password', ''))
dest_port = info.get('ipmi_port')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
protocol_version = str(info.get('ipmi_protocol_version', '2.0'))
force_boot_device = info.get('ipmi_force_boot_device', False)
if protocol_version not in VALID_PROTO_VERSIONS:
valid_versions = ', '.join(VALID_PROTO_VERSIONS)
raise exception.InvalidParameterValue(_(
"Invalid IPMI protocol version value %(version)s, the valid "
"value can be one of %(valid_versions)s") %
{'version': protocol_version, 'valid_versions': valid_versions})
if port is not None:
port = utils.validate_network_port(port, 'ipmi_terminal_port')
if dest_port is not None:
dest_port = utils.validate_network_port(dest_port, 'ipmi_port')
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'dest_port': dest_port,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address,
'protocol_version': protocol_version,
'force_boot_device': force_boot_device,
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
ipmi_version = ('lanplus'
if driver_info['protocol_version'] == '2.0'
else 'lan')
args = ['ipmitool',
'-I',
ipmi_version,
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['dest_port']:
args.append('-p')
args.append(driver_info['dest_port'])
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(driver_info['password'] or '\0') as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in six.text_type(e)]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting "%(cmd)s"'
'for node %(node)s. Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'), {
'node': driver_info['uuid'],
'cmd': e.cmd, 'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"),
{'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(
sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s")
% {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def _check_temp_dir():
"""Check for Valid temp directory."""
global TMP_DIR_CHECKED
# because a temporary file is used to pass the password to ipmitool,
# we should check the directory
if TMP_DIR_CHECKED is None:
try:
utils.check_dir()
except (exception.PathNotFound,
exception.DirectoryNotWritable,
exception.InsufficientDiskSpace) as e:
with excutils.save_and_reraise_exception():
TMP_DIR_CHECKED = False
err_msg = (_("Ipmitool drivers need to be able to create "
"temporary files to pass password to ipmitool. "
"Encountered error: %s") % e)
e.message = err_msg
LOG.error(err_msg)
else:
TMP_DIR_CHECKED = True
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
driver_utils.ensure_next_boot_device(task, driver_info)
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(
_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
driver_utils.ensure_next_boot_device(task, driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
if task.node.driver_info.get('ipmi_force_boot_device', False):
driver_utils.force_persistent_boot(task,
device,
persistent)
# Reset persistent to False, in case of BMC does not support
# persistent or we do not have admin rights.
persistent = False
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
driver_info = task.node.driver_info
driver_internal_info = task.node.driver_internal_info
if (driver_info.get('ipmi_force_boot_device', False) and
driver_internal_info.get('persistent_boot_device') and
driver_internal_info.get('is_next_boot_persistent', True)):
return {
'boot_device': driver_internal_info['persistent_boot_device'],
'persistent': True
}
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
if driver_info['protocol_version'] != '2.0':
raise exception.InvalidParameterValue(_(
"Serial over lan only works with IPMI protocol version 2.0. "
"Check the 'ipmi_protocol_version' parameter in "
"node's driver_info"))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'] or '\0')
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
ironic_utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: ConsoleError if unable to stop the console
"""
try:
console_utils.stop_shellinabox_console(task.node.uuid)
finally:
ironic_utils.unlink_without_raise(
_console_pwfile_path(task.node.uuid))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| |
import unittest
import pytz
import re
from pprint import pprint
from datetime import datetime, timedelta
from flightaware.client import Client
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read("developer.cfg")
username = config.get("test settings", "username")
api_key = config.get("test settings", "api_key")
verbose = config.getboolean("test settings", "verbose")
print "Using username => %s" % username
print "Using api_key => %s" % api_key
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.client = Client(username=username, api_key=api_key)
def tearDown(self):
pass
#
# Combined API tests
#
def test_basic_calls(self):
results = self.client.all_airlines()
self.assertNotIn("error", results)
results = self.client.all_airports()
self.assertNotIn("error", results)
results = self.client.count_airport_operations("BNA")
self.assertNotIn("error", results)
def test_weather_calls(self):
results = self.client.ntaf("BNA")
self.assertNotIn("error", results)
results = self.client.taf("BNA")
self.assertNotIn("error", results)
#
# Individual API tests
#
def test_aircraft_type(self):
results = self.client.aircraft_type("GALX")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_airline_flight_info(self):
faFlightID = self.client.get_flight_id("N415PW", 1442008560)
if verbose: pprint(faFlightID)
results = self.client.airline_flight_info(faFlightID)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_airline_flight_schedules(self):
start = datetime.now(tz=pytz.utc) + timedelta(days=2)
end = datetime.now(tz=pytz.utc) + timedelta(days=3)
results = self.client.airline_flight_schedules(
start_date=start,
end_date=end,
origin="KSFO",
destination="KLAX",
)
if verbose: pprint(results)
self.assertNotIn("error", results)
for result in results:
self.assertIn("arrival_time", result)
self.assertIn("departure_time", result)
def test_airline_info(self):
results = self.client.airline_info("SWA")
self.assertNotIn("error", results)
def test_airline_insight(self):
results = self.client.airline_insight("BNA", "ATL")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_airport_info(self):
results = self.client.airport_info("KasdfBNA")
if verbose: pprint(results)
self.assertIn("error", results)
results = self.client.airport_info("BNA")
if verbose: pprint(results)
self.assertNotIn("error", results)
results = self.client.airport_info("KBNA")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_all_airlines(self):
results = self.client.all_airlines()
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_all_airports(self):
results = self.client.all_airports()
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_arrived(self):
results = self.client.arrived("KSFO")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_block_indent_check(self):
results = self.client.block_indent_check("N415PW")
if verbose: pprint(results)
self.assertTrue(isinstance(results, (int, long)))
def test_count_airport_operations(self):
results = self.client.count_airport_operations("KSFO")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_count_all_enroute_airline_operations(self):
results = self.client.count_all_enroute_airline_operations()
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_decode_flight_route(self):
faFlightID = self.client.get_flight_id("N415PW", 1442008560)
if verbose: pprint(faFlightID)
results = self.client.decode_flight_route(faFlightID)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_decode_route(self):
results = self.client.decode_route("KSQL", "SJC V334 SAC SWR", "KTRK")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_fleet_arrived(self):
results = self.client.fleet_arrived("URF")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_fleet_scheduled(self):
results = self.client.fleet_scheduled("URF")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_flight_info(self):
results = self.client.flight_info("N415PW")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_flight_info_ex(self):
results = self.client.flight_info_ex("N415PW")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_get_alerts(self):
results = self.client.get_alerts()
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_get_flight_id(self):
results = self.client.get_flight_id("N415PW", 1442008560)
if verbose: pprint(results)
self.assertNotIn("error", results)
results = self.client.get_flight_id("N415PW", datetime.fromtimestamp(1442008560, tz=pytz.utc))
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_get_historical_track(self):
faFlightID = self.client.get_flight_id("N415PW", 1442008560)
if verbose: pprint(faFlightID)
results = self.client.get_historical_track(faFlightID)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_get_last_track(self):
results = self.client.get_last_track("N415PW")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_inbound_flight_info(self):
faFlightID = self.client.get_flight_id("SWA2612", 1442035080)
if verbose: pprint(faFlightID)
results = self.client.inbound_flight_info(faFlightID)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_in_flight_info(self):
results = self.client.in_flight_info("N415PW")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_lat_longs_to_distance(self):
results = self.client.lat_longs_to_distance(37.3626667, -121.9291111, 33.9425003, -118.4080736)
if verbose: pprint(results)
self.assertTrue(isinstance(results, (int, long)))
def test_lat_longs_to_heading(self):
results = self.client.lat_longs_to_heading(37.3626667, -121.9291111, 33.9425003, -118.4080736)
if verbose: pprint(results)
self.assertTrue(isinstance(results, (int, long)))
def test_map_flight(self):
results = self.client.map_flight("N415PW", 100, 100)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_map_flight_ex(self):
faFlightID = self.client.get_flight_id("SKW2494", 1442040480)
if verbose: pprint(faFlightID)
mapHeight = 100
mapWidth = 100
layer_on = ""
layer_off = ""
show_data_blocks = "true"
show_airports = "true"
airports_expand_view = "true"
latlon_box = ""
results = self.client.map_flight_ex(faFlightID, mapHeight, mapWidth, layer_on, layer_off, show_data_blocks, show_airports, airports_expand_view, latlon_box)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_metar(self):
results = self.client.metar("BNA")
self.assertNotIn("error", results)
def test_metar_ex(self):
results = self.client.metar_ex("BNA")
self.assertNotIn("error", results)
def test_ntaf(self):
results = self.client.ntaf("KSFO")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_register_alert_endpoint(self):
'''
#
# XXX: Need to figure out how to test this without disrupting the developer's alerts.
# Ideally, there'd be an API to get the current alert endpoint.
#
results = self.client.register_alert_endpoint("http://www.example.com")
if verbose: pprint(results)
self.assertNotIn("error", results)
'''
def test_routes_between_airports(self):
results = self.client.routes_between_airports("KSFO", "KLAX")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_routes_between_airports_ex(self):
results = self.client.routes_between_airports_ex("KSFO", "KLAX")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_scheduled(self):
results = self.client.scheduled("KSQL")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_search(self):
queries = [
{ "type" : "B77*" },
{ "belowAltitude" : 100, "aboveGroundspeed" : 200 },
{ "destination" : "LAX", "prefix" : "H" },
{ "idents" : "UAL*", "type" : "B73*" },
]
for parameters in queries:
if verbose: pprint(parameters)
results = self.client.search(parameters, 1)
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_search_birdseye_in_flight(self):
queries = [
[ "{< alt 100} {> gs 200}", "All aircraft below ten-thousand feet with a groundspeed over 200 kts" ],
[ "{match aircraftType B77*}", "All in-air Boeing 777s" ],
[ "{= dest KLAX} {= prefix H}", "All aircraft heading to Los Angeles International Airport (LAX) that are \"heavy\" aircraft" ],
[ "{match ident UAL*} {match aircraftType B73*}", "All United Airlines flights in Boeing 737s" ],
[ "{true lifeguard}", "All \"lifeguard\" rescue flights" ],
[ "{in orig {KLAX KBUR KSNA KLGB KVNY KSMO KLGB KONT}} {in dest {KJFK KEWR KLGA KTEB KHPN}}", "All flights between Los Angeles area and New York area" ],
[ "{range lat 36.897669 40.897669} {range lon -79.03655 -75.03655}", "All flights with a last reported position +/- 2 degrees of the Whitehouse" ],
[ "{> lastPositionTime 1278610758} {true inAir} {!= physClass P} {> circles 3}", "All flights that have a reported position after a specified epoch time, are still in the air, are not piston class, and have made several circular flight patterns (potentially in distress)" ],
]
for (query,comment) in queries:
if verbose: print "SearchBirdseyeInFlight: ", comment, "(", query, ")"
results = self.client.search_birdseye_in_flight(query, 1)
if verbose: pprint(results)
if u'error' in results and results[u'error'] != u'no results':
self.assertNotIn("error", results)
def test_search_birdseye_positions(self):
queries = [
[ "{< alt 100} {> gs 200}", "All flight positions below ten-thousand feet with a groundspeed over 200 kts" ],
[ "{match fp ASA*}", "All Alaska Airlines flight positions" ],
[ "{match fp ASA*} {> lat 45}", "All Alaska Airlines flight positions north of the 45th parallel" ],
[ "{range lat 36.897669 40.897669} {range lon -79.03655 -75.03655}", "All flight positions +/- 2 degrees of the lat/lon of the Whitehouse" ],
[ "{= fp N415PW-1442008613-adhoc-0}", "All flight positions for a specific flight identifier (faFlightID)" ],
]
for (query,comment) in queries:
if verbose: print "SearchBirdseyePositions: ", comment, "(", query, ")"
results = self.client.search_birdseye_positions(query, True, 1)
if verbose: pprint(results)
if u'error' in results and results[u'error'] != u'no results':
self.assertNotIn("error", results)
def test_search_count(self):
queries = [
{ "type" : "B77*" },
{ "belowAltitude" : 100, "aboveGroundspeed" : 200 },
{ "destination" : "LAX", "prefix" : "H" },
{ "idents" : "UAL*", "type" : "B73*" },
]
for parameters in queries:
if verbose: pprint(parameters)
results = self.client.search_count(parameters)
if verbose: pprint(results)
self.assertTrue(isinstance(results, (int, long)))
def test_set_alert(self):
"""
XXX: Need to implement this unit test.
"""
def test_set_maximum_result_sizes(self):
"""
XXX: Need to implement this unit test.
"""
def test_set_maximum_result_size(self):
results = self.client.set_maximum_result_size(15)
if verbose: pprint(results)
self.assertTrue(isinstance(results, (int, long)))
def test_taf(self):
results = self.client.taf("KSFO")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_tail_owner(self):
results = self.client.tail_owner("N415PW")
if verbose: pprint(results)
self.assertNotIn("error", results)
def test_zipcode_info(self):
results = self.client.zipcode_info("37221")
self.assertNotIn("error", results)
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_alert_rheader",
"cap_template_rheader",
"cap_info_rheader",
]
import time
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ["cap_alert",
"cap_info",
"cap_resource",
"cap_area",
]
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
"missingPerson.amberAlert": T("Child Abduction Emergency"), # http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
"missingPerson.silver": T("Missing Senior Citizen"), # http://en.wikipedia.org/wiki/Silver_Alert
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
tablename = "cap_alert"
table = define_table(tablename,
Field("is_template", "boolean",
readable=False,
writable=True),
Field("template_id", "reference cap_alert",
requires = IS_NULL_OR(
IS_ONE_OF(db, "cap_alert.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
represent = self.template_represent,
label = T("Template"),
comment = T("Apply a template"),
ondelete = "RESTRICT"),
Field("template_title",
label = T("Template Title")),
Field("template_settings", "text",
readable=False,
default="{}"),
Field("identifier", unique=True, length=128,
label = T("Identifier"),
default = self.generate_identifier),
Field("sender",
label = T("Sender"),
default = self.generate_sender),
Field("sent", "datetime",
writable=False,
readable=True),
Field("status",
label = T("Status"),
requires=IS_IN_SET(cap_alert_status_code_opts)),
Field("msg_type",
label = T("Message Type"),
requires=IS_IN_SET(cap_alert_msgType_code_opts)),
Field("source",
label = T("Source")),
Field("scope",
label = T("Scope"),
requires=IS_IN_SET(cap_alert_scope_code_opts)),
# Text decribing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction")),
Field("addresses", "list:string",
label = T("Recipients"),
#@ToDo: provide a better way to add multiple addresses, do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
represent=self.list_string_represent),
Field("codes", "text",
label = T("Codes"),
widget = S3KeyValueWidget(),
represent = S3KeyValueWidget.represent,
default = settings.get_cap_codes()
),
Field("note", "text",
label = T("Note")),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
# @ToDo: This should not be manually entered, needs a widget
#widget = S3ReferenceWidget(table, one_to_many=True, allow_create=False),
represent=self.alert_reference_represent),
# @ToDo: Switch to using event_incident_type_id
Field("incidents",
label = T("Incidents"),
requires=IS_IN_SET(cap_incident_type_opts,
multiple=True),
represent = self.list_string_represent),
*s3_meta_fields())
cap_search = S3Search(
simple = (S3SearchSimpleWidget(
name="org_search_text_simple",
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
field = ["sender",
"incidents",
"cap_info$headline",
"cap_info$event"
]
)
),
)
configure(tablename,
search_method=cap_search)
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
title_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
title_update = T("Edit Alert"), # If already-published, this should create a new "Update" alert instead of modifying the original
title_upload = T("Import Alerts"),
title_search = T("Search Alerts"),
subtitle_create = T("Create new Alert"),
label_list_button = T("List Alerts"),
label_create_button = ADD_ALERT,
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_id = S3ReusableField("alert_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "cap_alert.id",
self.alert_represent)),
represent = self.alert_represent,
label = T("Alert"),
comment = T("The alert message containing this information"),
ondelete = "RESTRICT")
# CAP Informations as component of Alerts
add_component("cap_info", cap_alert="alert_id")
# CAP Resources as component of Alerts
#add_component("cap_resource", cap_alert="alert_id")
# CAP Areas as component of Alerts
#add_component("cap_area", cap_alert="alert_id")
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Respone action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# CAP info priority
priorities = settings.get_cap_priorities()
try:
cap_info_priority_opts = OrderedDict([(f[0], f[1]) for f in priorities]
+ [("Undefined", T("Undefined"))])
except IndexError:
raise ValueError("CAP priorities setting is not structured properly")
# @ToDo: i18n: Need label=T("")
tablename = "cap_info"
table = define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default=True,
readable=False,
writable=False),
Field("template_info_id", "reference cap_info",
requires = IS_NULL_OR(
IS_ONE_OF(db, "cap_info.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
ondelete = "RESTRICT",
widget = S3HiddenWidget(),
readable=False),
Field("template_settings", "text", readable=False),
Field("language",
requires=IS_IN_SET(settings.get_cap_languages()),
default="en"),
Field("category",
represent=self.list_string_represent,
requires=IS_IN_SET(cap_info_category_opts,
multiple=True),
required=True),# 1 or more allowed
Field("event", required=True),
Field("response_type",
#widget = S3MultiSelectWidget(),
requires=IS_IN_SET(cap_info_responseType_opts,
multiple=True),
represent=self.list_string_represent), # 0 or more allowed
Field("priority",
requires=IS_IN_SET(cap_info_priority_opts)),
Field("urgency", required=True,
requires=IS_IN_SET(cap_info_urgency_opts)),
Field("severity", required=True,
requires=IS_IN_SET(cap_info_severity_opts)),
Field("certainty", required=True,
requires=IS_IN_SET(cap_info_certainty_opts)),
Field("audience", "text"),
Field("event_code", "text",
widget = S3KeyValueWidget(),
represent = S3KeyValueWidget.represent,
default = settings.get_cap_event_codes()
),
Field("effective", "datetime",
# @ToDo: format/represent for l10n options
widget = S3DateTimeWidget()),
Field("onset", "datetime",
widget = S3DateTimeWidget()),
Field("expires", "datetime",
widget = S3DateTimeWidget(past=0)),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires=IS_NULL_OR(IS_URL())),
Field("parameter", "text",
label = T("Parameters"),
widget = S3KeyValueWidget(),
represent = S3KeyValueWidget.represent,
default = settings.get_cap_parameters()
),
*s3_meta_fields())
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
title_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
title_search = T("Search alert information"),
subtitle_create = T("Create an information entry"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_create_button = ADD_INFO,
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_id = S3ReusableField("info_id", table,
sortby="identifier",
requires=IS_NULL_OR(
IS_ONE_OF(db, "cap_info.id",
self.info_represent)),
represent = self.info_represent,
label = T("Alert Information"),
comment = T("The alert information"),
ondelete = "RESTRICT")
configure(tablename,
onaccept=self.info_onaccept)
add_component("cap_resource", cap_info="info_id")
add_component("cap_area", cap_info="info_id")
# ---------------------------------------------------------------------
# CAP Resource segments
#
tablename = "cap_resource"
table = define_table(tablename,
info_id(),
alert_id(writable=False),
Field("resource_desc", required=True),
Field("mime_type", required=True),
Field("size", "integer",
writable = False),
Field("uri",
writable = False), # needs a special validation
Field("file", "upload"),
Field("deref_uri", "text",
writable=False, readable=False),
Field("digest",
writable=False),
*s3_meta_fields())
# @ToDo: CRUD Strings
configure(tablename,
onaccept=update_alert_id(table))
# ---------------------------------------------------------------------
# CAP info area segments
#
tablename = "cap_area"
table = define_table(tablename,
info_id(),
alert_id(writable=False),
Field("area_desc",
label = T("Area description"),
required=True),
self.gis_location_id(
widget = S3LocationSelectorWidget(polygon=True)
),
Field("circle"),
Field("geocode", "text",
widget = S3KeyValueWidget(),
represent = S3KeyValueWidget.represent,
default = settings.get_cap_geocodes),
Field("altitude", "integer"),
Field("ceiling", "integer"),
*s3_meta_fields())
# @ToDo: CRUD Strings
configure(tablename,
onaccept=update_alert_id(table))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = time.strftime("%Y%m%dT%H:%M:%S%z")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() \
or current.manager.domain
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%d%s%s" % \
(prefix, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.manager.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def alert_represent(id, row=None):
"""
Represent an alert concisely
"""
if row:
pass
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.msg_type,
table.sent,
table.created_on,
table.sender,
limitby=(0, 1)).first()
if row:
sent = row.sent or row.created_on
if row.msg_type:
return "%s - %s - %s" % (row.msg_type, sent, row.sender)
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return S3CAPModel.alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def alert_reference_represent(v):
"""
Represent an alert concisely
"""
return S3CAPModel.list_string_represent(v, S3CAPModel.alert_represent)
# -------------------------------------------------------------------------
@staticmethod
def info_represent(id, row=None):
"""
Represent an alert information concisely
"""
if row:
pass
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_info
row = db(table.id == id).select(table.headline,
table.alert_id,
table.language,
limitby=(0, 1)).first()
# @ToDo: Should get headline from "info"?
return "%s - %s" % (row.language, row.headline)
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
vars = form.vars
elif "id" in form:
vars = form
elif hasattr(form, "vars"):
vars = form.vars
else:
vars = form
info_id = vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
if alert_id and cap_alert_is_template(alert_id):
info.update(is_template = True)
return True
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(
language=T("Language"),
category=T("Category"),
event=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_alert_rheader(r):
""" Resource Header for Alerts """
if r.representation == "html":
item = r.record
if item:
T = current.T
table = current.s3db.cap_info
query = (table.alert_id == item.id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
else:
error = ""
tabs = [
(T("Alert Qualifiers"), None),
(T("Information"), "info"),
#(T("Edit Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
A(S3CAPModel.alert_represent(item.id),
_href=URL(c="cap", f="alert",
args=[item.id, "update"]))
)
),
rheader_tabs,
error
)
return rheader
return None
# =============================================================================
def cap_template_rheader(r):
""" Resource Header for Alert templates"""
if r.representation == "html":
item = r.record
if item:
T = current.T
table = current.s3db.cap_info
query = (table.alert_id == item.id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
error = []
if not (row and row.id):
error.append(DIV(T("An alert needs to contain at least one info item."),
_class="error"))
tabs = [
(T("Template"), None),
(T("Information template"), "info"),
#(T("Edit Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR( TH("%s: " % T("Template")),
A(S3CAPModel.template_represent(item.id),
_href=URL(c="cap", f="template", args=[item.id, "update"]))
)
),
rheader_tabs,
*error
)
return rheader
return None
# =============================================================================
def cap_info_rheader(r):
""" Resource Header for Info segments """
if r.representation == "html":
item = r.record
if item:
T = current.T
tabs = [
(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(item.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
A(S3CAPModel.template_represent(item.alert_id),
_href=URL(c="cap", f="template",
args=[item.alert_id, "update"])),
),
TR(TH("%s: " % T("Info template")),
A(S3CAPModel.info_represent(item.id),
_href=URL(c="cap", f="info",
args=[item.id, "update"])),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Edit Area"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
A(S3CAPModel.alert_represent(item.alert_id),
_href=URL(c="cap", f="alert",
args=[item.alert_id, "update"])),
),
TR(TH("%s: " % T("Information")),
A(S3CAPModel.info_represent(item.id),
_href=URL(c="cap", f="info",
args=[item.id, "update"])),
)
),
rheader_tabs
)
return rheader
return None
# =============================================================================
def update_alert_id(table):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
vars = form.vars
elif "id" in form:
vars = form
elif hasattr(form, "vars"):
vars = form.vars
else:
vars = form
# Get the full record
id = vars.id
if not id:
return
db = current.db
item = db(table.id == id).select(table.info_id,
limitby=(0, 1)).first()
info_id = item.info_id
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
alert_id = info.alert_id
db(table.id == id).update(alert_id = alert_id)
db.commit()
return func
# END =========================================================================
| |
import keras
from keras.optimizers import SGD, adadelta, rmsprop, adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.metrics import matthews_correlation, precision, recall
import keras.backend as K
import cPickle
import numpy as np
import getpass
username = getpass.getuser()
from little_foo2 import foo
def sens(y_true, y_pred):
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
se = tp / (tp + fn)
return se
def spec(y_true, y_pred):
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
sp = tn / (fp + tn)
return sp
def get_weights(n_dataset):
weights='best_weights_lilfoo_{0}_{1}.h5'.format(i,username)
model = foo()
model.load_weights(weights)
print ('weights loaded')
return model
def get_data(n_dataset):
f = file('MODS_all_data_bw_224_224_{0}.pkl'.format(n_dataset),'rb')
data = cPickle.load(f)
f.close()
training_data = data[0]
validation_data = data[1]
t_data = training_data[0]
t_label = training_data[1]
test_data = validation_data[0]
test_label = validation_data[1]
t_data = np.array(t_data)
t_label = np.array(t_label)
test_data = np.array(test_data)
test_label = np.array(test_label)
t_data = t_data.reshape(t_data.shape[0], 1, 224, 224)
test_data = test_data.reshape(test_data.shape[0], 1, 224, 224)
#less precision means less memory needed: 64 -> 32 (half the memory used)
t_data = t_data.astype('float32')
test_data = test_data.astype('float32')
return (t_data, t_label), (test_data, test_label)
def test_net(i):
model = get_weights(i)
print 'using weights from net trained on dataset {0}'. format(i)
history = LossAccHistory()
(X_train, y_train), (X_test, y_test) = get_data(i)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_test /= 255
print(X_test.shape[0], 'test samples')
model.compile(loss='binary_crossentropy',
optimizer= rmsprop(lr=0.001), #adadelta
metrics=['accuracy', 'matthews_correlation', 'precision', 'recall', sens, spec])
score = model.evaluate(X_test, Y_test, verbose=1)
print (model.metrics_names, score)
if (len(cvscores[0])==0): #if metric names haven't been saved, do so
cvscores[0].append(model.metrics_names)
else:
counter = 1
for k in score: #for each test metric, append it to the cvscores list
cvscores[counter].append(k)
counter +=1
model.reset_states()
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
test_metrics.append(cvscores[0])
other_counter = 0
for metric in cvscores[1:]:
v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
print v
test_metrics.append(v)
other_counter +=1
if other_counter == 7:
other_counter=0
return cvscores, test_metrics
def save_metrics(cvscores, test_metrics):
#save test metrics to txt file
file = open('MODS_lilfoo_test_metrics.txt', 'w')
for j in cvscores:
file.write('\n%s\n' % j)
for i in test_metrics:
file.write('\n%s\n' % i)
file.close()
print test_metrics
class LossAccHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accu = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accu.append(logs.get('acc'))
nb_classes = 2
nb_epoch = 100
n_dataset = 5
dropout = 0.5
batch_size = 72
optimizer = 'rmsprop'
test_metrics = []
cvscores = [[],[],[],[],[],[], [], []]
#cvscores = [[metrics],[loss],[acc],[mcc],[precision],[recall], [sens], [spec]]
for i in xrange(n_dataset):
test_net(i)
cvscores, test_metrics = cv_calc()
print cvscores, test_metrics
save_metrics(cvscores, test_metrics)
| |
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
import ip
import json
import logging
import netaddr
import uuid
import warnings
import dictutils as du
import filesystem as filesys
logger = logging.getLogger('osa-inventory')
INVENTORY_SKEL = {
'_meta': {
'hostvars': {}
}
}
# This is a list of items that all hosts should have at all times.
# Any new item added to inventory that will used as a default argument in the
# inventory setup should be added to this list.
REQUIRED_HOSTVARS = [
'properties',
'ansible_host',
'physical_host_group',
'container_address',
'container_name',
'container_networks',
'physical_host',
'component'
]
class MultipleHostsWithOneIPError(Exception):
def __init__(self, ip, assigned_host, new_host):
self.ip = ip
self.assigned_host = assigned_host
self.new_host = new_host
# Order the hostnames for predictable testing.
host_list = [assigned_host, new_host]
host_list.sort()
error_msg = ("Both host:{} and host:{} have "
"address:{} assigned. Cannot "
"assign same ip to both hosts")
self.message = error_msg.format(host_list[0], host_list[1], ip)
def __str__(self):
return self.message
class ProviderNetworkMisconfiguration(Exception):
def __init__(self, queue_name):
self.queue_name = queue_name
error_msg = ("Provider network with queue '{queue}' "
"requires 'is_container_address' and "
"'is_ssh_address' to be set to True.")
self.message = error_msg.format(queue=self.queue_name)
def __str__(self):
return self.message
class MultipleIpForHostError(Exception):
def __init__(self, hostname, current_ip, new_ip):
self.hostname = hostname
self.current_ip = current_ip
self.new_ip = new_ip
# Sort the IPs for our error message so we're always consistent.
ips = [current_ip, new_ip]
ips.sort()
error_msg = "Host {hostname} has both {ips[0]} and {ips[1]} assigned"
self.message = error_msg.format(hostname=hostname, ips=ips)
def __str__(self):
return self.message
class MissingStaticRouteInfo(Exception):
def __init__(self, queue_name):
self.queue_name = queue_name
error_msg = ("Static route provider network with queue '{queue}' "
"needs both 'cidr' and 'gateway' values.")
self.message = error_msg.format(queue=self.queue_name)
def __str__(self):
return self.message
class LxcHostsDefined(Exception):
def __init__(self):
self.message = ("The group 'lxc_hosts' must not be defined in config;"
" it will be dynamically generated.")
def __str__(self):
return self.message
class GroupConflict(Exception):
pass
def _parse_belongs_to(key, belongs_to, inventory):
"""Parse all items in a `belongs_to` list.
This function assumes the key defined is a group that has child subgroups,
*not* a group with hosts defined in the group configuration.
:param key: ``str`` Name of key to append to a given entry
:param belongs_to: ``list`` List of items to iterate over
:param inventory: ``dict`` Living dictionary of inventory
"""
for item in belongs_to:
if key not in inventory[item]['children']:
appended = du.append_if(array=inventory[item]['children'],
item=key)
if appended:
logger.debug("Added %s to %s", key, item)
def _build_container_hosts(container_affinity, container_hosts, type_and_name,
inventory, host_type, container_type,
container_host_type, physical_host_type, config,
properties, assignment):
"""Add in all of the host associations into inventory.
This will add in all of the hosts into the inventory based on the given
affinity for a container component and its subsequent type groups.
:param container_affinity: ``int`` Set the number of a given container
:param container_hosts: ``list`` List of containers on an host
:param type_and_name: ``str`` Combined name of host and container name
:param inventory: ``dict`` Living dictionary of inventory
:param host_type: ``str`` Name of the host type
:param container_type: ``str`` Type of container
:param container_host_type: ``str`` Type of host
:param physical_host_type: ``str`` Name of physical host group
:param config: ``dict`` User defined information
:param properties: ``dict`` Container properties
:param assignment: ``str`` Name of container component target
"""
container_list = []
is_metal = False
if properties:
is_metal = properties.get('is_metal', False)
for make_container in range(container_affinity):
for i in container_hosts:
if '{}-'.format(type_and_name) in i:
du.append_if(array=container_list, item=i)
existing_count = len(list(set(container_list)))
if existing_count < container_affinity:
hostvars = inventory['_meta']['hostvars']
container_mapping = inventory[container_type]['children']
address = None
if is_metal is False:
cuuid = str(uuid.uuid4())
cuuid = cuuid.split('-')[0]
container_host_name = '{}-{}'.format(type_and_name, cuuid)
logger.debug("Generated container name %s",
container_host_name)
hostvars_options = hostvars[container_host_name] = {}
if container_host_type not in inventory:
inventory[container_host_type] = {
"hosts": [],
}
appended = du.append_if(
array=inventory[container_host_type]["hosts"],
item=container_host_name
)
if appended:
logger.debug("Added container %s to %s",
container_host_name, container_host_type)
du.append_if(array=container_hosts, item=container_host_name)
else:
if host_type not in hostvars:
hostvars[host_type] = {}
hostvars_options = hostvars[host_type]
container_host_name = host_type
host_type_config = config[physical_host_type][host_type]
address = host_type_config.get('ip')
# Create a host types containers group and append it to inventory
host_type_containers = '{}-host_containers'.format(host_type)
du.append_if(array=container_mapping, item=host_type_containers)
hostvars_options.update({
'properties': properties,
'ansible_host': address,
'container_address': address,
'container_name': container_host_name,
'physical_host': host_type,
'physical_host_group': physical_host_type,
'component': assignment
})
def _append_container_types(inventory, host_type):
"""Append the "physical_host" type to all containers.
:param inventory: ``dict`` Living dictionary of inventory
:param host_type: ``str`` Name of the host type
"""
for _host in inventory['_meta']['hostvars'].keys():
hdata = inventory['_meta']['hostvars'][_host]
if 'container_name' in hdata:
if hdata['container_name'].startswith(host_type):
if 'physical_host' not in hdata:
logger.debug("Set physical host for %s to %s",
_host, host_type)
hdata['physical_host'] = host_type
def _append_to_host_groups(inventory, container_type, assignment, host_type,
type_and_name, host_options):
"""Append all containers to physical (logical) groups based on host types.
:param inventory: ``dict`` Living dictionary of inventory
:param container_type: ``str`` Type of container
:param assignment: ``str`` Name of container component target
:param host_type: ``str`` Name of the host type
:param type_and_name: ``str`` Combined name of host and container name
"""
physical_group_type = '{}_all'.format(container_type.split('_')[0])
if physical_group_type not in inventory:
logger.debug("Added %s group to inventory", physical_group_type)
inventory[physical_group_type] = {'hosts': []}
iph = inventory[physical_group_type]['hosts']
iah = inventory[assignment]['hosts']
for hname, hdata in inventory['_meta']['hostvars'].iteritems():
is_metal = False
properties = hdata.get('properties')
if properties:
is_metal = properties.get('is_metal', False)
if 'container_types' in hdata or 'container_name' in hdata:
if 'container_name' not in hdata:
container = hdata['container_name'] = hname
else:
container = hdata['container_name']
component = hdata.get('component')
if container.startswith(host_type):
if 'physical_host' not in hdata:
hdata['physical_host'] = host_type
if container.startswith('{}-'.format(type_and_name)):
appended = du.append_if(array=iah, item=container)
if appended:
logger.debug("Added host %s to %s hosts",
container, assignment)
elif is_metal is True:
if component == assignment:
appended = du.append_if(array=iah, item=container)
if appended:
logger.debug("Added is_metal host %s to %s hosts",
container, assignment)
if container.startswith('{}-'.format(type_and_name)):
appended = du.append_if(array=iph, item=container)
if appended:
logger.debug("Added host %s to %s hosts",
container, physical_group_type)
elif is_metal is True:
if container.startswith(host_type):
appended = du.append_if(array=iph, item=container)
if appended:
logger.debug("Added is_metal host %s to %s hosts",
container, physical_group_type)
# Append any options in config to the host_vars of a container
container_vars = host_options.get('container_vars')
if isinstance(container_vars, dict):
for _keys, _vars in container_vars.items():
# Copy the options dictionary for manipulation
if isinstance(_vars, dict):
options = _vars.copy()
else:
options = _vars
limit = None
# If a limit is set use the limit string as a filter
# for the container name and see if it matches.
if isinstance(options, (str, dict, list)):
if 'limit_container_types' in options:
limit = options.pop(
'limit_container_types', None
)
if limit is None or (component and limit in component):
logger.debug("Set options for %s", hname)
hdata[_keys] = options
def _add_container_hosts(assignment, config, container_name, container_type,
inventory, properties):
"""Add a given container name and type to the hosts.
:param assignment: ``str`` Name of container component target
:param config: ``dict`` User defined information
:param container_name: ``str`` Name fo container
:param container_type: ``str`` Type of container
:param inventory: ``dict`` Living dictionary of inventory
:param properties: ``dict`` Dict of container properties
"""
physical_host_type = '{}_hosts'.format(container_type.split('_')[0])
# If the physical host type is not in config return
if physical_host_type not in config:
return
for host_type in inventory[physical_host_type]['hosts']:
container_hosts = inventory[container_name]['hosts']
# If host_type is not in config do not append containers to it
if host_type not in config[physical_host_type]:
continue
# Get any set host options
host_options = config[physical_host_type][host_type]
affinity = host_options.get('affinity', {})
container_affinity = affinity.get(container_name, 1)
# Ensures that container names are not longer than 63
# This section will ensure that we are not it by the following bug:
# https://bugzilla.mindrot.org/show_bug.cgi?id=2239
type_and_name = '{}_{}'.format(host_type, container_name)
logger.debug("Generated container name %s", type_and_name)
max_hostname_len = 52
if len(type_and_name) > max_hostname_len:
raise SystemExit(
'The resulting combination of [ "{}" + "{}" ] is longer than'
' 52 characters. This combination will result in a container'
' name that is longer than the maximum allowable hostname of'
' 63 characters. Before this process can continue please'
' adjust the host entries in your "openstack_user_config.yml"'
' to use a short hostname. The recommended hostname length is'
' < 20 characters long.'.format(host_type, container_name)
)
physical_host = inventory['_meta']['hostvars'][host_type]
container_host_type = '{}-host_containers'.format(host_type)
if 'container_types' not in physical_host:
physical_host['container_types'] = container_host_type
elif physical_host['container_types'] != container_host_type:
physical_host['container_types'] = container_host_type
# Add all of the containers into the inventory
logger.debug("Building containers for host %s", container_name)
_build_container_hosts(
container_affinity,
container_hosts,
type_and_name,
inventory,
host_type,
container_type,
container_host_type,
physical_host_type,
config,
properties,
assignment,
)
# Add the physical host type to all containers from the built inventory
_append_container_types(inventory, host_type)
_append_to_host_groups(
inventory,
container_type,
assignment,
host_type,
type_and_name,
host_options
)
def user_defined_setup(config, inventory):
"""Apply user defined entries from config into inventory.
:param config: ``dict`` User defined information
:param inventory: ``dict`` Living dictionary of inventory
"""
hvs = inventory['_meta']['hostvars']
for key, value in config.iteritems():
if key.endswith('hosts'):
if key not in inventory:
logger.debug("Key %s was added to inventory", key)
inventory[key] = {'hosts': []}
if value is None:
logger.debug("Key %s had no value", key)
return
for _key, _value in value.iteritems():
if _key not in hvs:
hvs[_key] = {}
hvs[_key].update({
'ansible_host': _value['ip'],
'container_address': _value['ip'],
'is_metal': True,
'physical_host_group': key
})
logger.debug("Hostvars info updated for %s", key)
# If the entry is missing the properties key add it.
properties = hvs[_key].get('properties')
if not properties or not isinstance(properties, dict):
hvs[_key]['properties'] = dict()
hvs[_key]['properties'].update({'is_metal': True})
if 'host_vars' in _value:
for _k, _v in _value['host_vars'].items():
hvs[_key][_k] = _v
ip.USED_IPS.add(_value['ip'])
appended = du.append_if(array=inventory[key]['hosts'],
item=_key)
if appended:
logger.debug("Added host %s to group %s",
_key, key)
def skel_setup(environment, inventory):
"""Build out the main inventory skeleton as needed.
:param environment: ``dict`` Known environment information
:param inventory: ``dict`` Living dictionary of inventory
"""
for key, value in environment.iteritems():
if key == 'version':
continue
for _key, _value in value.iteritems():
if _key not in inventory:
logger.debug("Key %s added to inventory", _key)
inventory[_key] = {}
if _key.endswith('container'):
if 'hosts' not in inventory[_key]:
inventory[_key]['hosts'] = []
else:
if 'children' not in inventory[_key]:
inventory[_key]['children'] = []
# TODO(nrb): This line is duplicated above;
# is that necessary?
if 'hosts' not in inventory[_key]:
inventory[_key]['hosts'] = []
if 'belongs_to' in _value:
for assignment in _value['belongs_to']:
if assignment not in inventory:
logger.debug("Created group %s", assignment)
inventory[assignment] = {}
if 'children' not in inventory[assignment]:
inventory[assignment]['children'] = []
if 'hosts' not in inventory[assignment]:
inventory[assignment]['hosts'] = []
def skel_load(skeleton, inventory):
"""Build out data as provided from the defined `skel` dictionary.
:param skeleton: ``dict`` Dictionary defining group and component
memberships for the inventory.
:param inventory: ``dict`` Living dictionary of inventory
"""
for key, value in skeleton.iteritems():
_parse_belongs_to(
key,
belongs_to=value['belongs_to'],
inventory=inventory
)
def network_entry(is_metal, interface,
bridge=None, net_type=None, net_mtu=None):
"""Return a network entry for a container."""
# TODO(cloudnull) After a few releases this conditional should be
# simplified. The container address checking that is ssh address
# is only being done to support old inventory.
if is_metal:
_network = dict()
else:
_network = {'interface': interface}
if bridge:
_network['bridge'] = bridge
if net_type:
_network['type'] = net_type
if net_mtu:
_network['mtu'] = net_mtu
return _network
def _add_additional_networks(key, inventory, ip_q, q_name, netmask, interface,
bridge, net_type, net_mtu, user_config,
is_ssh_address, is_container_address,
static_routes):
"""Process additional ip adds and append then to hosts as needed.
If the host is found to be "is_metal" it will be marked as "on_metal"
and will not have an additionally assigned IP address.
:param key: ``str`` Component key name. This could be a group or a host
name
:param inventory: ``dict`` Living dictionary of inventory.
:param ip_q: ``object`` build queue of IP addresses.
:param q_name: ``str`` key to use in host vars for storage. May be blank.
:param netmask: ``str`` netmask to use.
:param interface: ``str`` interface name to set for the network.
:param user_config: ``dict`` user defined configuration details.
:param is_ssh_address: ``bol`` set this address as ansible_host.
:param is_container_address: ``bol`` set this address to container_address.
:param static_routes: ``list`` List containing static route dicts.
"""
base_hosts = inventory['_meta']['hostvars']
lookup = inventory.get(key, list())
if 'children' in lookup and lookup['children']:
for group in lookup['children']:
_add_additional_networks(
group,
inventory,
ip_q,
q_name,
netmask,
interface,
bridge,
net_type,
net_mtu,
user_config,
is_ssh_address,
is_container_address,
static_routes
)
# Make sure the lookup object has a value.
if lookup:
hosts = lookup.get('hosts')
if not hosts:
return
else:
return
# TODO(cloudnull) after a few releases this should be removed.
if q_name:
old_address = '{}_address'.format(q_name)
else:
old_address = '{}_address'.format(interface)
for container_host in hosts:
container = base_hosts[container_host]
# TODO(cloudnull) after a few releases this should be removed.
# This removes the old container network value that now serves purpose.
container.pop('container_network', None)
if 'container_networks' in container:
networks = container['container_networks']
else:
networks = container['container_networks'] = dict()
is_metal = False
properties = container.get('properties')
if properties:
is_metal = properties.get('is_metal', False)
# This should convert found addresses based on q_name + "_address"
# and then build the network if its not found.
if not is_metal and old_address not in networks:
network = networks[old_address] = network_entry(
is_metal,
interface,
bridge,
net_type,
net_mtu
)
if old_address in container and container[old_address]:
network['address'] = container.pop(old_address)
elif not is_metal:
address = ip.get_ip_address(name=q_name, ip_q=ip_q)
if address:
network['address'] = address
network['netmask'] = netmask
elif is_metal:
network = networks[old_address] = network_entry(
is_metal,
interface,
bridge,
net_type,
net_mtu
)
network['netmask'] = netmask
if is_ssh_address or is_container_address:
# Container physical host group
cphg = container.get('physical_host_group')
# user_config data from the container physical host group
phg = user_config[cphg][container_host]
network['address'] = phg['ip']
if is_ssh_address is True:
container['ansible_host'] = networks[old_address]['address']
if is_container_address is True:
container['container_address'] = networks[old_address]['address']
if static_routes:
# NOTE: networks[old_address]['static_routes'] will get
# regenerated on each run
networks[old_address]['static_routes'] = []
for route in static_routes:
# only add static routes if they are specified correctly;
# that is, the key and a value must be present. This doesn't
# ensure that the values provided are routable, just that
# they are not empty.
cidr_present = route.get('cidr', False)
gateway_present = route.get('gateway', False)
if not (cidr_present and gateway_present):
raise MissingStaticRouteInfo(q_name)
networks[old_address]['static_routes'].append(route)
def container_skel_load(container_skel, inventory, config):
"""Build out all containers as defined in the environment file.
:param container_skel: ``dict`` container skeleton for all known containers
:param inventory: ``dict`` Living dictionary of inventory
:param config: ``dict`` User defined information
"""
logger.debug("Loading container skeleton")
for key, value in container_skel.iteritems():
contains_in = value.get('contains', False)
belongs_to_in = value.get('belongs_to', False)
if contains_in or belongs_to_in:
for assignment in value['contains']:
for container_type in value['belongs_to']:
_add_container_hosts(
assignment,
config,
key,
container_type,
inventory,
value.get('properties', {})
)
else:
cidr_networks = config.get('cidr_networks')
provider_queues = {}
for net_name in cidr_networks:
ip_q = ip.load_optional_q(
cidr_networks, cidr_name=net_name
)
provider_queues[net_name] = ip_q
if ip_q is not None:
net = netaddr.IPNetwork(cidr_networks.get(net_name))
q_netmask = '{}_netmask'.format(net_name)
provider_queues[q_netmask] = str(net.netmask)
overrides = config['global_overrides']
# iterate over a list of provider_networks, var=pn
pns = overrides.get('provider_networks', list())
for pn in pns:
# p_net are the provider_network values
p_net = pn.get('network')
if not p_net:
continue
q_name = p_net.get('ip_from_q')
ip_from_q = provider_queues.get(q_name)
if ip_from_q:
netmask = provider_queues['{}_netmask'.format(q_name)]
else:
netmask = None
for group in p_net.get('group_binds', list()):
_add_additional_networks(
key=group,
inventory=inventory,
ip_q=ip_from_q,
q_name=q_name,
netmask=netmask,
interface=p_net['container_interface'],
bridge=p_net['container_bridge'],
net_type=p_net.get('container_type'),
net_mtu=p_net.get('container_mtu'),
user_config=config,
is_ssh_address=p_net.get('is_ssh_address'),
is_container_address=p_net.get('is_container_address'),
static_routes=p_net.get('static_routes')
)
populate_lxc_hosts(inventory)
def populate_lxc_hosts(inventory):
"""Insert nodes hosting LXC containers into the lxc_hosts group
The inventory dictionary passed in to this function will be mutated.
:param inventory: The dictionary containing the Ansible inventory
"""
host_nodes = _find_lxc_hosts(inventory)
inventory['lxc_hosts'] = {'hosts': host_nodes}
logger.debug("Created lxc_hosts group.")
def _find_lxc_hosts(inventory):
"""Build the lxc_hosts dynamic group
Inspect the generated inventory for nodes that host LXC containers.
Return a list of those that match for insertion into the inventory.
Populate the 'lxc_hosts' group with any node that matches.
This and the populate_lxc_hosts function are split in order to be less
coupled and more testable.
:param inventory: The dictionary containing the Ansible inventory
:returns: List of hostnames that are LXC hosts
:rtype: list
"""
host_nodes = []
for host, hostvars in inventory['_meta']['hostvars'].items():
physical_host = hostvars.get('physical_host', None)
# We want this node's "parent", so append the physical host
if not host == physical_host:
appended = du.append_if(array=host_nodes, item=physical_host)
if appended:
logger.debug("%s added to lxc_hosts group", physical_host)
return host_nodes
def _ensure_inventory_uptodate(inventory, container_skel):
"""Update inventory if needed.
Inspect the current inventory and ensure that all host items have all of
the required entries.
:param inventory: ``dict`` Living inventory of containers and hosts
"""
host_vars = inventory['_meta']['hostvars']
for hostname, _vars in host_vars.items():
if 'container_name' not in _vars:
_vars['container_name'] = hostname
for rh in REQUIRED_HOSTVARS:
if rh not in _vars:
_vars[rh] = None
if rh == 'container_networks':
_vars[rh] = {}
# For each of the various properties in the container skeleton,
# copy them into the host's properties dictionary
for container_type, type_vars in container_skel.items():
item = inventory.get(container_type)
# Note: this creates an implicit dependency on skel_setup which
# adds the hosts entries.
hosts = item.get('hosts')
if hosts:
for host in hosts:
container = host_vars[host]
if 'properties' in type_vars:
logger.debug("Copied propeties for %s from skeleton",
container)
container['properties'] = type_vars['properties']
def _parse_global_variables(user_cidr, inventory, user_defined_config):
"""Add any extra variables that may have been set in config.
:param user_cidr: ``str`` IP address range in CIDR notation
:param inventory: ``dict`` Living inventory of containers and hosts
:param user_defined_config: ``dict`` User defined variables
"""
if 'all' not in inventory:
inventory['all'] = {}
if 'vars' not in inventory['all']:
inventory['all']['vars'] = {}
# Write the users defined cidr into global variables.
inventory['all']['vars']['container_cidr'] = user_cidr
if 'global_overrides' in user_defined_config:
if isinstance(user_defined_config['global_overrides'], dict):
inventory['all']['vars'].update(
user_defined_config['global_overrides']
)
logger.debug("Applied global_overrides")
kept_vars = user_defined_config['global_overrides'].keys()
kept_vars.append('container_cidr')
# Remove global overrides that were deleted from inventory, too
for key in inventory['all']['vars'].keys():
if key not in kept_vars:
logger.debug("Deleting key %s from inventory", key)
del inventory['all']['vars'][key]
def _check_same_ip_to_multiple_host(config):
"""Check for IPs assigned to multiple hosts
: param: config: ``dict`` User provided configuration
"""
ips_to_hostnames_mapping = dict()
for key, value in config.iteritems():
if key.endswith('hosts'):
for _key, _value in value.iteritems():
hostname = _key
ip = _value['ip']
if not (ip in ips_to_hostnames_mapping):
ips_to_hostnames_mapping[ip] = hostname
else:
if ips_to_hostnames_mapping[ip] != hostname:
info = (ip, ips_to_hostnames_mapping[ip], hostname)
raise MultipleHostsWithOneIPError(*info)
logger.debug("No hosts with duplicated IPs found")
def _check_multiple_ips_to_host(config):
"""Check for multiple IPs assigned to a single hostname
:param: config: ``dict`` User provided configuration
"""
# Extract only the dictionaries in the host groups.
host_ip_map = {}
for groupnames, group in config.items():
if '_hosts' in groupnames:
for hostname, entries in group.items():
if hostname not in host_ip_map:
host_ip_map[hostname] = entries['ip']
else:
current_ip = host_ip_map[hostname]
new_ip = entries['ip']
if not current_ip == new_ip:
raise MultipleIpForHostError(hostname, current_ip,
new_ip)
logger.debug("No hosts with multiple IPs found.")
return True
def _check_lxc_hosts(config):
if 'lxc_hosts' in config.keys():
raise LxcHostsDefined()
logger.debug("lxc_hosts group not defined")
def _check_group_branches(config, physical_skel):
"""Ensure that groups have either hosts or child groups, not both
The inventory skeleton population assumes that groups will either have
hosts as "leaves", or other groups as children, not both. This function
ensures this invariant is met by comparing the configuration to the
physical skeleton definition.
:param config: ``dict`` The contents of the user configuration file. Keys
present in this dict are assumed to be groups containing host entries.
:param config: ``dict`` The physical skeleton tree, defining parent/child
relationships between groups. Values in the 'belong_to' key are
assumed to be parents of other groups.
:raises GroupConflict:
"""
logging.debug("Checking group branches match expectations")
for group, relations in physical_skel.items():
if 'belongs_to' not in relations:
continue
parents = relations['belongs_to']
for parent in parents:
if parent in config.keys():
message = (
"Group {parent} has a child group {child}, "
"but also has host entries in user configuration. "
"Hosts cannot be sibling with groups."
).format(parent=parent, child=group)
raise GroupConflict(message)
logging.debug("Group branches ok.")
return True
def _check_config_settings(cidr_networks, config, container_skel):
"""check preciseness of config settings
:param cidr_networks: ``dict`` cidr_networks from config
:param config: ``dict`` User defined information
:param container_skel: ``dict`` container skeleton for all known containers
"""
# search for any container that doesn't have is_metal flag set to true
is_provider_networks_needed = False
for key, value in container_skel.iteritems():
properties = value.get('properties', {})
is_metal = properties.get('is_metal', False)
if not is_metal:
is_provider_networks_needed = True
break
if is_provider_networks_needed:
if ('global_overrides' not in config):
raise SystemExit(
"global_overrides can't be found in user config"
)
elif ('provider_networks' not in config['global_overrides']):
raise SystemExit(
"provider networks can't be found under "
"global_overrides in user config"
)
else:
# make sure that provider network's ip_from_q is valid
overrides = config['global_overrides']
pns = overrides.get('provider_networks', list())
for pn in pns:
p_net = pn.get('network')
if not p_net:
continue
q_name = p_net.get('ip_from_q')
if q_name and q_name not in cidr_networks:
raise SystemExit(
"can't find " + q_name + " in cidr_networks"
)
if (p_net.get('container_bridge') ==
overrides.get('management_bridge')):
if (not p_net.get('is_ssh_address') or
not p_net.get('is_container_address')):
raise ProviderNetworkMisconfiguration(q_name)
logger.debug("Provider network information OK")
# look for same ip address assigned to different hosts
_check_same_ip_to_multiple_host(config)
_check_multiple_ips_to_host(config)
_check_lxc_hosts(config)
def _check_all_conf_groups_present(config, environment):
"""Verifies that all groups defined in the config are in the environment
If a group is in config but not the environment, a warning will be raised.
Multiple warnings can be raised, and the return value will be set to False.
If all groups found are in the environment, the function returns True
:param config: ``dict`` user's provided configuration
:param environment: ``dict`` group membership mapping
:rtype: bool, True if all groups are in environment, False otherwise
"""
excludes = ('global_overrides', 'cidr_networks', 'used_ips')
config_groups = [k for k in config.keys() if k not in excludes]
env_groups = environment['physical_skel'].keys()
retval = True
for group in config_groups:
if group not in env_groups:
msg = ("Group {} was found in configuration but "
"not the environment.".format(group))
warnings.warn(msg)
retval = False
return retval
def _collect_hostnames(inventory):
# Generate a list of all hosts and their used IP addresses
hostnames_ips = {}
for _host, _vars in inventory['_meta']['hostvars'].iteritems():
host_hash = hostnames_ips[_host] = {}
for _key, _value in _vars.iteritems():
if _key.endswith('address') or _key == 'ansible_host':
host_hash[_key] = _value
return hostnames_ips
def _prepare_debug_logger():
log_fmt = "%(lineno)d - %(funcName)s: %(message)s"
logging.basicConfig(format=log_fmt, filename='inventory.log')
logger.setLevel(logging.DEBUG)
logger.info("Beginning new inventory run")
def main(config=None, check=False, debug=False, environment=None, **kwargs):
"""Run the main application.
:param config: ``str`` Directory from which to pull configs and overrides
:param check: ``bool`` Flag to enable check mode
:param debug: ``bool`` Flag to enable debug logging
:param kwargs: ``dict`` Dictionary of arbitrary arguments; mostly for
catching Ansible's required `--list` parameter without name shadowing
the `list` built-in.
:param environment: ``str`` Directory containing the base env.d
"""
if debug:
_prepare_debug_logger()
try:
user_defined_config = filesys.load_user_configuration(config)
except filesys.MissingDataSource as ex:
raise SystemExit(ex)
base_env_dir = environment
base_env = filesys.load_environment(base_env_dir, {})
environment = filesys.load_environment(config, base_env)
# Load existing inventory file if found
inventory, inv_path = filesys.load_inventory(config, INVENTORY_SKEL)
# Save the users container cidr as a group variable
cidr_networks = user_defined_config.get('cidr_networks')
if not cidr_networks:
raise SystemExit('No container CIDR specified in user config')
if 'container' in cidr_networks:
user_cidr = cidr_networks['container']
elif 'management' in cidr_networks:
user_cidr = cidr_networks['management']
else:
raise SystemExit('No container or management network '
'specified in user config.')
# make sure user_defined config is self contained
_check_config_settings(
cidr_networks,
user_defined_config,
environment.get('container_skel')
)
# Add the container_cidr into the all global ansible group_vars
_parse_global_variables(user_cidr, inventory, user_defined_config)
# Load all of the IP addresses that we know are used and set the queue
ip.set_used_ips(user_defined_config, inventory)
user_defined_setup(user_defined_config, inventory)
skel_setup(environment, inventory)
_check_group_branches(
user_defined_config,
environment.get('physical_skel')
)
logger.debug("Loading physical skel.")
skel_load(
environment.get('physical_skel'),
inventory
)
logger.debug("Loading component skel")
skel_load(
environment.get('component_skel'),
inventory
)
container_skel_load(
environment.get('container_skel'),
inventory,
user_defined_config
)
# Look at inventory and ensure all entries have all required values.
_ensure_inventory_uptodate(
inventory=inventory,
container_skel=environment.get('container_skel'),
)
# Load the inventory json
inventory_json = json.dumps(
inventory,
indent=4,
separators=(',', ': '),
sort_keys=True
)
if check:
if _check_all_conf_groups_present(user_defined_config, environment):
return 'Configuration ok!'
# Save a list of all hosts and their given IP addresses
hostnames_ips = _collect_hostnames(inventory)
filesys.write_hostnames(config, hostnames_ips)
if logger.isEnabledFor(logging.DEBUG):
num_hosts = len(inventory['_meta']['hostvars'])
logger.debug("%d hosts found.", num_hosts)
# Save new dynamic inventory
filesys.save_inventory(inventory_json, inv_path)
return inventory_json
| |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from chainladder.utils.weighted_regression import WeightedRegression
from chainladder.development import Development, DevelopmentBase
import numpy as np
import pandas as pd
import warnings
class MunichAdjustment(DevelopmentBase):
"""Applies the Munich Chainladder adjustment to a set of paid/incurred
ldfs. The Munich method heavily relies on the ratio of paid/incurred
and its inverse.
Parameters
----------
paid_to_incurred : tuple or list of tuples
A tuple representing the paid and incurred ``columns`` of the triangles
such as ``('paid', 'incurred')``
fillna : boolean
The MunichAdjustment will fail when P/I or I/P ratios cannot be calculated.
Setting fillna to True will fill the triangle with expected amounts using
the simple chainladder.
Attributes
----------
basic_cdf_ : Triangle
The univariate cumulative development patterns
basic_sigma_ : Triangle
Sigma of the univariate ldf regression
resids_ : Triangle
Residuals of the univariate ldf regression
q_ : Triangle
chainladder age-to-age factors of the paid/incurred triangle and its
inverse. For paid measures it is (P/I) and for incurred measures it is
(I/P).
q_resids_ : Triangle
Residuals of q regression.
rho_ : Triangle
Estimated conditional deviation around ``q_``
lambda_ : Series or DataFrame
Dependency coefficient between univariate chainladder link ratios and
``q_resids_``
ldf_ : Triangle
The estimated bivariate loss development patterns
cdf_ : Triangle
The estimated bivariate cumulative development patterns
"""
def __init__(self, paid_to_incurred=None, fillna=False):
if type(paid_to_incurred) is dict:
warnings.warn(
"paid_to_incurred dict argument is deprecated, use tuple instead"
)
paid_to_incurred = [(k, v) for k, v in paid_to_incurred.items()]
self.paid_to_incurred = paid_to_incurred
self.fillna = fillna
def fit(self, X, y=None, sample_weight=None):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Set of LDFs to which the munich adjustment will be applied.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
from chainladder import options
if self.paid_to_incurred is None:
raise ValueError("Must enter valid value for paid_to_incurred.")
if X.array_backend == "sparse":
obj = X.set_backend("numpy")
else:
obj = X.copy()
xp = obj.get_array_module()
self.xp = xp
missing = xp.nan_to_num(obj.values) * obj.nan_triangle == 0
self.rho_ = obj[obj.origin == obj.origin.min()]
if len(xp.where(missing)[0]) > 0:
if self.fillna:
from chainladder.methods import Chainladder
filler = Chainladder().fit(obj).full_expectation_
filler = filler[filler.valuation <= obj.valuation_date].values
obj.values = xp.where(missing, filler, obj.values)
else:
raise ValueError(
"MunichAdjustment cannot be performed when P/I or I/P "
+ "ratios cannot be computed. Use `fillna=True` to impute zero"
+ " values of the triangle with simple chainladder expectation."
)
if "ldf_" not in obj:
obj = Development().fit_transform(obj)
self.p_to_i_X_ = self._get_p_to_i_object(obj)
self.p_to_i_ldf_ = self._get_p_to_i_object(obj.ldf_)
self.p_to_i_sigma_ = self._get_p_to_i_object(obj.sigma_)
self.q_f_, self.rho_sigma_ = self._get_MCL_model(obj)
self.residual_, self.q_resid_ = self._get_MCL_resids(obj)
self.lambda_coef_ = self._get_MCL_lambda(obj)
self.ldf_ = self._set_ldf(
obj, self._get_mcl_cdf(obj, self.munich_full_triangle_)
)
self.ldf_.is_cumulative = False
self.ldf_.valuation_date = pd.to_datetime(options.ULT_VAL)
self._map = {
(list(X.columns).index(x)): (num % 2, num // 2)
for num, x in enumerate(np.array(self.paid_to_incurred).flatten())
}
self.rho_.values = self._reshape("rho_sigma_")
return self
def transform(self, X):
""" If X and self are of different shapes, align self to X, else
return self.
Parameters
----------
X : Triangle
The triangle to be transformed
Returns
-------
X_new : New triangle with transformed attributes.
"""
backend = X.array_backend
if backend == "sparse":
X_new = X.set_backend("numpy")
else:
X_new = X.copy()
xp = X_new.get_array_module()
self.xp = xp
if "ldf_" not in X_new:
X_new = Development().fit_transform(X_new)
self.xp = X_new.get_array_module()
X_new.p_to_i_X_ = self._get_p_to_i_object(X_new)
X_new.p_to_i_ldf_ = self._get_p_to_i_object(X_new.ldf_)
X_new.p_to_i_sigma_ = self._get_p_to_i_object(X_new.sigma_)
X_new.q_f_, X_new.rho_sigma_ = self._get_MCL_model(X_new)
X_new.munich_full_triangle_ = self._get_munich_full_triangle_(
X_new.p_to_i_X_,
X_new.p_to_i_ldf_,
X_new.p_to_i_sigma_,
self.lambda_coef_,
X_new.rho_sigma_,
X_new.q_f_,
)
X_new.ldf_ = self._set_ldf(
X_new, self._get_mcl_cdf(X_new, X_new.munich_full_triangle_)
)
del self.xp
triangles = ["rho_", "lambda_", "lambda_coef_"]
for item in triangles:
setattr(X_new, item, getattr(self, item))
X_new._set_slicers()
X_new.sigma_ = X_new.std_err_ = X_new.ldf_ * 0 + 1
return X_new
def _get_p_to_i_object(self, obj):
if type(self.paid_to_incurred) is tuple:
p_to_i = [self.paid_to_incurred]
else:
p_to_i = self.paid_to_incurred
xp = obj.get_array_module()
paid = obj[[item[0] for item in p_to_i][0]]
for item in [item[0] for item in p_to_i][1:]:
paid[item] = obj[item]
incurred = obj[[item[1] for item in p_to_i][0]]
for item in [item[1] for item in p_to_i][1:]:
incurred[item] = obj[item]
paid = paid.values[None]
incurred = incurred.values[None]
return xp.concatenate((paid, incurred), axis=0)
def _p_to_i_concate(self, obj_p, obj_i, xp):
return xp.concatenate((obj_p[None], obj_i[None]), 0)
def _get_MCL_model(self, X):
xp = X.get_array_module()
p, i = self.p_to_i_X_[0], self.p_to_i_X_[1]
modelsP = WeightedRegression(axis=2, thru_orig=True, xp=xp)
modelsP = modelsP.fit(p, i, 1 / p).sigma_fill(X.sigma_interpolation)
modelsI = WeightedRegression(axis=2, thru_orig=True, xp=xp)
modelsI = modelsI.fit(i, p, 1 / i).sigma_fill(X.sigma_interpolation)
q_f = self._p_to_i_concate(modelsP.slope_, modelsI.slope_, xp)
rho_sigma = self._p_to_i_concate(modelsP.sigma_, modelsI.sigma_, xp)
return xp.swapaxes(q_f, -1, -2), xp.swapaxes(rho_sigma, -1, -2)
def _get_MCL_resids(self, X):
xp = X.get_array_module()
p_to_i_ata = self._get_p_to_i_object(X.link_ratio)
p_to_i_ldf = self.p_to_i_ldf_
p_to_i_sigma = self.p_to_i_sigma_
paid, incurred = self.p_to_i_X_[0], self.p_to_i_X_[1]
p_to_i_ldf = xp.unique(p_to_i_ldf, axis=-2) # May cause issues later
p_to_i_sigma = xp.unique(p_to_i_sigma, axis=-2) # May cause issues
residP = (
(p_to_i_ata[0] - p_to_i_ldf[0])
/ p_to_i_sigma[0]
* xp.sqrt(paid[..., :-1, :-1])
)
residI = (
(p_to_i_ata[1] - p_to_i_ldf[1])
/ p_to_i_sigma[1]
* xp.sqrt(incurred[..., :-1, :-1])
)
nans = (X - X[X.valuation == X.valuation_date]).values[0, 0] * 0 + 1
q_resid = (
(paid / incurred - self.q_f_[1])
/ self.rho_sigma_[1]
* xp.sqrt(incurred)
* nans
)
q_inv_resid = (
(incurred / paid - 1 / self.q_f_[1])
/ self.rho_sigma_[0]
* xp.sqrt(paid)
* nans
)
resid = self._p_to_i_concate(residP, residI, xp)
q_resid = self._p_to_i_concate(q_inv_resid, q_resid, xp)
return resid, q_resid
def _get_MCL_lambda(self, obj):
xp = obj.get_array_module()
k, v, o, d = self.residual_[1].shape
w = xp.reshape(self.residual_[1], (k, v, o * d))
w[w == 0] = xp.nan
w = w * 0 + 1
lambdaI = (
WeightedRegression(thru_orig=True, axis=-1, xp=xp)
.fit(
xp.reshape(self.q_resid_[1][..., :-1, :-1], (k, v, o * d)),
xp.reshape(self.residual_[1], (k, v, o * d)),
w,
)
.slope_
)
lambdaP = (
WeightedRegression(thru_orig=True, axis=-1, xp=xp)
.fit(
xp.reshape(self.q_resid_[0][..., :-1, :-1], (k, v, o * d)),
xp.reshape(self.residual_[0], (k, v, o * d)),
w,
)
.slope_
)
return self._p_to_i_concate(lambdaP, lambdaI, xp)[..., None]
@property
def munich_full_triangle_(self):
return self._get_munich_full_triangle_(
self.p_to_i_X_,
self.p_to_i_ldf_,
self.p_to_i_sigma_,
self.lambda_coef_,
self.rho_sigma_,
self.q_f_,
)
def _get_munich_full_triangle_(
self, p_to_i_X_, p_to_i_ldf_, p_to_i_sigma_, lambda_coef_, rho_sigma_, q_f_
):
xp = self.xp if hasattr(self, "xp") else self.ldf_.get_array_module()
full_paid = xp.nan_to_num(p_to_i_X_[0][..., 0:1])
full_incurred = p_to_i_X_[1][..., 0:1]
for i in range(p_to_i_X_[0].shape[-1] - 1):
paid = (
p_to_i_ldf_[0][..., i : i + 1]
+ lambda_coef_[0]
* p_to_i_sigma_[0][..., i : i + 1]
/ rho_sigma_[0][..., i : i + 1]
* (
full_incurred[..., -1:] / full_paid[..., -1:]
- q_f_[0][..., i : i + 1]
)
) * full_paid[..., -1:]
inc = (
p_to_i_ldf_[1][..., i : i + 1]
+ self.lambda_coef_[1]
* p_to_i_sigma_[1][..., i : i + 1]
/ rho_sigma_[1][..., i : i + 1]
* (
full_paid[..., -1:] / full_incurred[..., -1:]
- q_f_[1][..., i : i + 1]
)
) * full_incurred[..., -1:]
full_incurred = xp.concatenate(
(
full_incurred,
xp.nan_to_num(p_to_i_X_[1][..., i + 1 : i + 2])
+ (1 - xp.nan_to_num(p_to_i_X_[1][..., i + 1 : i + 2] * 0 + 1))
* inc,
),
axis=3,
)
full_paid = xp.concatenate(
(
full_paid,
xp.nan_to_num(p_to_i_X_[0][..., i + 1 : i + 2])
+ (1 - xp.nan_to_num(p_to_i_X_[0][..., i + 1 : i + 2] * 0 + 1))
* paid,
),
axis=3,
)
return self._p_to_i_concate(full_paid, full_incurred, xp)
def _get_mcl_cdf(self, X, munich_full_triangle_):
""" needs to be an attribute that gets assigned. requires we overwrite
the cdf and ldf methods with
"""
xp = X.get_array_module()
obj = X.cdf_.copy()
obj.values = xp.repeat(obj.values, len(X.odims), 2)
obj.odims = X.odims
if type(self.paid_to_incurred) is tuple:
p_to_i = [self.paid_to_incurred]
else:
p_to_i = self.paid_to_incurred
cdf_triangle = munich_full_triangle_
cdf_triangle = cdf_triangle[..., -1:] / cdf_triangle[..., :-1]
paid = [item[0] for item in p_to_i]
for n, item in enumerate(paid):
idx = np.where(X.cdf_.vdims == item)[0][0]
obj.values[:, idx : idx + 1, ...] = cdf_triangle[0, :, n : n + 1, ...]
incurred = [item[1] for item in p_to_i]
for n, item in enumerate(incurred):
idx = np.where(X.cdf_.vdims == item)[0][0]
obj.values[:, idx : idx + 1, ...] = cdf_triangle[1, :, n : n + 1, ...]
obj._set_slicers()
return obj
def _set_ldf(self, X, cdf):
ldf_tri = cdf.values.copy()
xp = X.get_array_module()
ldf_tri = xp.concatenate((ldf_tri, xp.ones(ldf_tri.shape)[..., -1:]), -1)
ldf_tri = ldf_tri[..., :-1] / ldf_tri[..., 1:]
obj = cdf.copy()
obj.values = ldf_tri
obj.ddims = X.link_ratio.ddims
obj.is_pattern = True
obj._set_slicers
return obj
def _reshape(self, measure):
xp = self.xp if hasattr(self, "xp") else self.ldf_.get_array_module()
map = self._map
return xp.concatenate(
[
getattr(self, measure)[map[k][0], :, map[k][1] : map[k][1] + 1, ...]
for k in range(len(map))
],
axis=1,
)
@property
def lambda_(self):
obj = self.ldf_.copy()
obj.odims = obj.odims[0:1]
obj.ddims = obj.ddims[0:1]
obj.values = self._reshape("lambda_coef_")
return obj.to_frame()
@property
def basic_cdf_(self):
obj = self.ldf_.copy()
obj.values = self._reshape("p_to_i_ldf_")
return obj
@property
def basic_sigma_(self):
obj = self.ldf_.copy()
obj.values = self._reshape("p_to_i_sigma_")
return obj
@property
def resids_(self):
obj = self.ldf_.copy()
obj.values = self._reshape("residual_")
obj.odims = self.cdf_.odims[: obj.values.shape[2]]
return obj
@property
def q_(self):
obj = self.rho_.copy()
obj.odims = self.cdf_.odims
obj.values = self._reshape("q_f_")
return obj
@property
def q_resids_(self):
obj = self.ldf_.copy()
obj.values = self._reshape("q_resid_")[
..., : self.residual_.shape[-2], : self.residual_.shape[-1]
]
obj.odims = obj.odims[: obj.values.shape[2]]
obj.ddims = obj.ddims[: obj.values.shape[3]]
return obj
| |
"""Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
template - the default prefix for all temporary names.
You may change this to control the default prefix.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except IOError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises os.error if the
# file doesn't exist.
def _stat(fn):
try:
f = open(fn)
except IOError:
raise _os.error
f.close()
def _exists(fn):
try:
_stat(fn)
except _os.error:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789_")
def __init__(self):
self.mutex = _allocate_lock()
self.normcase = _os.path.normcase
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def next(self):
m = self.mutex
c = self.characters
choose = self.rng.choice
m.acquire()
try:
letters = [choose(c) for dummy in "123456"]
finally:
m.release()
return self.normcase(''.join(letters))
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, _os.error):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except (OSError, IOError) as e:
if e.args[0] == _errno.EEXIST:
continue
if (_os.name == 'nt' and e.args[0] == _errno.EACCES and
_os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
# On windows, when a directory with the chosen name already
# exists, EACCES error code is returned instead of EEXIST.
continue
break # no point trying more names in this directory
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
if (_os.name == 'nt' and e.errno == _errno.EACCES and
_os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
# On windows, when a directory with the chosen name already
# exists, EACCES error code is returned instead of EEXIST.
continue
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0700)
return file
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
if (_os.name == 'nt' and e.errno == _errno.EACCES and
_os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
# On windows, when a directory with the chosen name already
# exists, EACCES error code is returned instead of EEXIST.
continue
raise
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not issubclass(type(a), type(0)):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
file = _os.fdopen(fd, mode, bufsize)
return _TemporaryFileWrapper(file, name, delete)
except:
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _os.fdopen(fd, mode, bufsize)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from
StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', bufsize=-1,
suffix="", prefix=template, dir=None):
self._file = _StringIO()
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir)
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(*self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# _StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs[0]
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
def next(self):
return self._file.next
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self):
self._file.truncate()
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
def xreadlines(self, *args):
if hasattr(self._file, 'xreadlines'): # real file
return iter(self._file)
else: # StringIO()
return iter(self._file.readlines(*args))
| |
import datetime
import uuid
from copy import copy
from decimal import Decimal
from django.db.models.signals import post_save
from mock import patch
from django.test import SimpleTestCase, TestCase
from corehq.apps.commtrack.models import StockState, update_domain_mapping
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.apps.userreports.indicators import LedgerBalancesIndicator
from corehq.apps.userreports.indicators.factory import IndicatorFactory
from corehq.apps.userreports.specs import EvaluationContext
from corehq.apps.products.models import SQLProduct
from corehq.util.context_managers import drop_connected_signals
class SingleIndicatorTestBase(SimpleTestCase):
def _check_result(self, indicator, document, value, context=None):
[result] = indicator.get_values(document, context=context)
self.assertEqual(value, result.value)
class BooleanIndicatorTest(SingleIndicatorTestBase):
def setUp(self):
self.indicator = IndicatorFactory.from_spec({
'type': 'boolean',
'column_id': 'col',
'filter': {
'type': 'property_match',
'property_name': 'foo',
'property_value': 'bar',
}
})
self.assertEqual(1, len(self.indicator.get_columns()))
def testNoColumnId(self):
with self.assertRaises(BadSpecError):
IndicatorFactory.from_spec({
'type': 'boolean',
'filter': {
'type': 'property_match',
'property_name': 'foo',
'property_value': 'bar',
}
})
def testEmptyColumnId(self):
with self.assertRaises(BadSpecError):
IndicatorFactory.from_spec({
'type': 'boolean',
'column_id': '',
'filter': {
'type': 'property_match',
'property_name': 'foo',
'property_value': 'bar',
}
})
def testNoFilter(self):
with self.assertRaises(BadSpecError):
IndicatorFactory.from_spec({
'type': 'boolean',
'column_id': 'col',
})
def testEmptyFilter(self):
with self.assertRaises(BadSpecError):
IndicatorFactory.from_spec({
'type': 'boolean',
'column_id': 'col',
'filter': None,
})
def testBadFilterType(self):
with self.assertRaises(BadSpecError):
IndicatorFactory.from_spec({
'type': 'boolean',
'column_id': 'col',
'filter': 'wrong type',
})
def testInvalidFilter(self):
with self.assertRaises(BadSpecError):
IndicatorFactory.from_spec({
'type': 'boolean',
'column_id': 'col',
'filter': {
'type': 'property_match',
'property_value': 'bar',
}
})
def testIndicatorMatch(self):
self._check_result(self.indicator, dict(foo='bar'), 1)
def testIndicatorNoMatch(self):
self._check_result(self.indicator, dict(foo='not bar'), 0)
def testIndicatorMissing(self):
self._check_result(self.indicator, dict(notfoo='bar'), 0)
def testComplexStructure(self):
# in slightly more compact format:
# ((foo=bar) or (foo1=bar1 and foo2=bar2 and (foo3=bar3 or foo4=bar4)))
indicator = IndicatorFactory.from_spec({
"type": "boolean",
"column_id": "col",
"filter": {
"type": "or",
"filters": [
{
"type": "property_match",
"property_name": "foo",
"property_value": "bar"
},
{
"type": "and",
"filters": [
{
"type": "property_match",
"property_name": "foo1",
"property_value": "bar1"
},
{
"type": "property_match",
"property_name": "foo2",
"property_value": "bar2"
},
{
"type": "or",
"filters": [
{
"type": "property_match",
"property_name": "foo3",
"property_value": "bar3"
},
{
"type": "property_match",
"property_name": "foo4",
"property_value": "bar4"
}
]
},
]
},
]
}
})
# first level or
self._check_result(indicator, dict(foo='bar'), 1)
# first level and with both or's
self._check_result(indicator, dict(foo1='bar1', foo2='bar2', foo3='bar3'), 1)
self._check_result(indicator, dict(foo1='bar1', foo2='bar2', foo4='bar4'), 1)
# first and not right
self._check_result(indicator, dict(foo1='not bar1', foo2='bar2', foo3='bar3'), 0)
# second and not right
self._check_result(indicator, dict(foo1='bar1', foo2='not bar2', foo3='bar3'), 0)
# last and not right
self._check_result(indicator, dict(foo1='bar1', foo2='bar2', foo3='not bar3', foo4='not bar4'), 0)
def test_not_null_filter_root_doc(self):
indicator = IndicatorFactory.from_spec(
{
"filter": {
"filter": {
"operator": "in",
"expression": {
"expression": {
"datatype": None,
"type": "property_name",
"property_name": "ccs_opened_date"
},
"type": "root_doc"
},
"type": "boolean_expression",
"property_value": [
"",
None
]
},
"type": "not"
},
"type": "boolean",
"display_name": None,
"column_id": "prop1_not_null"
}
)
self._check_result(indicator, {}, 1, EvaluationContext(root_doc=dict(ccs_opened_date='not_null')))
self._check_result(indicator, {}, 1, EvaluationContext(root_doc=dict(ccs_opened_date=' ')))
self._check_result(indicator, {}, 0, EvaluationContext(root_doc=dict(ccs_opened_date='')))
self._check_result(indicator, {}, 0, EvaluationContext(root_doc=dict(ccs_opened_date=None)))
self._check_result(indicator, {}, 0, EvaluationContext(root_doc=dict()))
class CountIndicatorTest(SingleIndicatorTestBase):
def testCount(self):
indicator = IndicatorFactory.from_spec({
"type": "count",
"column_id": "count",
"display_name": "Count"
})
self._check_result(indicator, dict(), 1)
class RawIndicatorTest(SingleIndicatorTestBase):
def testMetadataDefaults(self):
indicator = IndicatorFactory.from_spec({
"type": "raw",
"column_id": "foo",
"datatype": "integer",
'property_name': 'foo',
"display_name": "raw foos",
})
self.assertEqual(True, indicator.column.is_nullable)
self.assertEqual(False, indicator.column.is_primary_key)
def testMetadataOverrides(self):
indicator = IndicatorFactory.from_spec({
"type": "raw",
"column_id": "foo",
"datatype": "integer",
'property_name': 'foo',
"display_name": "raw foos",
"is_nullable": False,
"is_primary_key": True,
})
self.assertEqual(False, indicator.column.is_nullable)
self.assertEqual(True, indicator.column.is_primary_key)
def test_raw_ints(self):
indicator = IndicatorFactory.from_spec({
"type": "raw",
"column_id": "foo",
"datatype": "integer",
'property_name': 'foo',
"display_name": "raw foos",
})
self._check_result(indicator, dict(foo="bar"), None)
self._check_result(indicator, dict(foo=1), 1)
self._check_result(indicator, dict(foo=1.2), 1)
self._check_result(indicator, dict(foo=None), None)
self._check_result(indicator, dict(nofoo='foryou'), None)
def test_raw_strings(self):
indicator = IndicatorFactory.from_spec({
"type": "raw",
"column_id": "foo",
"datatype": "string",
'property_name': 'foo',
"display_name": "raw foos",
})
self._check_result(indicator, dict(foo="bar"), 'bar')
self._check_result(indicator, dict(foo=1), '1')
self._check_result(indicator, dict(foo=1.2), '1.2')
self._check_result(indicator, dict(foo=None), None)
self._check_result(indicator, dict(nofoo='foryou'), None)
def testNestedSinglePath(self):
self._check_result(
self._default_nested_indicator(['property']),
{'property': 'the right value'},
'the right value'
)
def testNestedDeepReference(self):
test_doc = {
'parent': {
'child': {
'grandchild': 'the right value'
}
}
}
self._check_result(
self._default_nested_indicator(["parent", "child", "grandchild"]),
test_doc,
'the right value'
)
def testNestedInvalidTopLevel(self):
self._check_result(
self._default_nested_indicator(['parent', 'child']),
{'badparent': 'bad value'},
None,
)
def testNestedInvalidMidLevel(self):
test_doc = {
'parent': {
'badchild': {
'grandchild': 'the wrong value'
}
}
}
self._check_result(
self._default_nested_indicator(["parent", "child", "grandchild"]),
test_doc,
None
)
def _default_nested_indicator(self, path):
return IndicatorFactory.from_spec({
"type": "raw",
"column_id": "foo",
"datatype": "string",
"property_path": path,
"display_name": "indexed",
})
class ExpressionIndicatorTest(SingleIndicatorTestBase):
@property
def simple_indicator(self):
return IndicatorFactory.from_spec({
"type": "expression",
"expression": {
"type": "property_name",
"property_name": "foo",
},
"column_id": "foo",
"datatype": "string",
"display_name": "expression foos",
})
@property
def complex_indicator(self):
# this expression is the equivalent to:
# doc.true_value if doc.test == 'match' else doc.false_value
return IndicatorFactory.from_spec({
"type": "expression",
"expression": {
'type': 'conditional',
'test': {
'type': 'boolean_expression',
'expression': {
'type': 'property_name',
'property_name': 'test',
},
'operator': 'eq',
'property_value': 'match',
},
'expression_if_true': {
'type': 'property_name',
'property_name': 'true_value',
},
'expression_if_false': {
'type': 'property_name',
'property_name': 'false_value',
},
},
"column_id": "foo",
"datatype": "string",
"display_name": "expression foos",
})
def test_expression(self):
self._check_result(self.simple_indicator, dict(foo="bar"), "bar")
def test_missing_value(self):
self._check_result(self.simple_indicator, dict(notfoo="bar"), None)
def test_complicated_expression(self):
# largely duplicated from ConditionalExpressionTest
indicator = self.complex_indicator
self._check_result(indicator, {
'test': 'match',
'true_value': 'correct',
'false_value': 'incorrect',
}, 'correct')
self._check_result(indicator, {
'test': 'non-match',
'true_value': 'correct',
'false_value': 'incorrect',
}, 'incorrect')
self._check_result(indicator, {
'true_value': 'correct',
'false_value': 'incorrect',
}, 'incorrect')
self._check_result(indicator, {}, None)
def test_datasource_transform(self):
indicator = IndicatorFactory.from_spec({
"type": "expression",
"column_id": "transformed_value",
"display_name": "transformed value",
"expression": {
"type": "property_name",
"property_name": "month",
},
"datatype": "string",
"transform": {
"type": "custom",
"custom_type": "month_display"
},
})
self._check_result(indicator, {'month': "3"}, "March")
def test_literal(self):
indicator = IndicatorFactory.from_spec({
"type": "expression",
"expression": 10,
"column_id": "foo",
"datatype": "integer"
})
self._check_result(indicator, {}, 10)
self._check_result(indicator, {'foo': 'bar'}, 10)
class ChoiceListIndicatorTest(SimpleTestCase):
def setUp(self):
self.spec = {
"type": "choice_list",
"column_id": "col",
"display_name": "the category",
"property_name": "category",
"choices": [
"bug",
"feature",
"app",
"schedule"
],
"select_style": "single",
}
def _check_vals(self, indicator, document, expected_values):
values = indicator.get_values(document)
for i, val in enumerate(values):
self.assertEqual(expected_values[i], val.value)
def testConstructChoiceList(self):
indicator = IndicatorFactory.from_spec(self.spec)
cols = indicator.get_columns()
self.assertEqual(4, len(cols))
for i, choice in enumerate(self.spec['choices']):
self.assertTrue(self.spec['column_id'] in cols[i].id)
self.assertTrue(choice in cols[i].id)
self.assertEqual(self.spec['display_name'], indicator.display_name)
def testChoiceListWithPath(self):
spec = copy(self.spec)
del spec['property_name']
spec['property_path'] = ['path', 'to', 'category']
indicator = IndicatorFactory.from_spec(spec)
self._check_vals(indicator, {'category': 'bug'}, [0, 0, 0, 0])
self._check_vals(indicator, {'path': {'category': 'bug'}}, [0, 0, 0, 0])
self._check_vals(indicator, {'path': {'to': {'category': 'bug'}}}, [1, 0, 0, 0])
self._check_vals(indicator, {'path': {'to': {'nothing': 'bug'}}}, [0, 0, 0, 0])
def testSingleSelectIndicators(self):
indicator = IndicatorFactory.from_spec(self.spec)
self._check_vals(indicator, dict(category='bug'), [1, 0, 0, 0])
self._check_vals(indicator, dict(category='feature'), [0, 1, 0, 0])
self._check_vals(indicator, dict(category='app'), [0, 0, 1, 0])
self._check_vals(indicator, dict(category='schedule'), [0, 0, 0, 1])
self._check_vals(indicator, dict(category='nomatch'), [0, 0, 0, 0])
self._check_vals(indicator, dict(category=''), [0, 0, 0, 0])
self._check_vals(indicator, dict(nocategory='bug'), [0, 0, 0, 0])
def testMultiSelectIndicators(self):
spec = copy(self.spec)
spec['select_style'] = 'multiple'
indicator = IndicatorFactory.from_spec(spec)
self._check_vals(indicator, dict(category='bug'), [1, 0, 0, 0])
self._check_vals(indicator, dict(category='feature'), [0, 1, 0, 0])
self._check_vals(indicator, dict(category='app'), [0, 0, 1, 0])
self._check_vals(indicator, dict(category='schedule'), [0, 0, 0, 1])
self._check_vals(indicator, dict(category='nomatch'), [0, 0, 0, 0])
self._check_vals(indicator, dict(category=''), [0, 0, 0, 0])
self._check_vals(indicator, dict(nocategory='bug'), [0, 0, 0, 0])
self._check_vals(indicator, dict(category='bug feature'), [1, 1, 0, 0])
self._check_vals(indicator, dict(category='bug feature app schedule'), [1, 1, 1, 1])
self._check_vals(indicator, dict(category='bug nomatch'), [1, 0, 0, 0])
class IndicatorDatatypeTest(SingleIndicatorTestBase):
def testDecimal(self):
indicator = IndicatorFactory.from_spec({
'type': 'raw',
'column_id': 'col',
"property_name": "foo",
"datatype": "decimal",
})
self._check_result(indicator, dict(foo=5.5), Decimal(5.5))
self._check_result(indicator, dict(foo=None), None)
self._check_result(indicator, dict(foo="banana"), None)
class LedgerBalancesIndicatorTest(SimpleTestCase):
def setUp(self):
self.spec = {
"type": "ledger_balances",
"column_id": "soh",
"display_name": "Stock On Hand",
"ledger_section": "soh",
"product_codes": ["abc", "def", "ghi"],
"case_id_expression": {
"type": "property_name",
"property_name": "_id"
}
}
self.stock_states = {'abc': 32, 'def': 85, 'ghi': 11}
@patch.object(LedgerBalancesIndicator, '_get_values_by_product')
def test_ledger_balances_indicator(self, get_values_by_product):
get_values_by_product.return_value = self.stock_states
indicator = IndicatorFactory.from_spec(self.spec)
doc = {'_id': 'case1'}
values = indicator.get_values(doc, EvaluationContext(doc, 0))
self.assertEqual(
[(val.column.id, val.value) for val in values],
[('soh_abc', 32), ('soh_def', 85), ('soh_ghi', 11)]
)
class TestGetValuesByProduct(TestCase):
@classmethod
def setUpClass(cls):
post_save.disconnect(update_domain_mapping, StockState)
cls.domain_obj = create_domain('test-domain')
for product_code, section, value in [
('coke', 'soh', 32),
('coke', 'consumption', 63),
('surge', 'soh', 85),
('fanta', 'soh', 11),
]:
product = cls._make_product(product_code)
cls._make_stock_state(product, section, value)
@classmethod
def tearDownClass(cls):
post_save.connect(update_domain_mapping, StockState)
cls.domain_obj.delete()
@staticmethod
def _make_product(code):
return SQLProduct.objects.create(
domain='test-domain',
product_id=uuid.uuid4().hex,
code=code,
)
@staticmethod
def _make_stock_state(product, section_id, value):
with drop_connected_signals(post_save):
return StockState.objects.create(
stock_on_hand=value,
case_id='case1',
product_id=product.product_id,
sql_product=product,
section_id=section_id,
last_modified_date=datetime.datetime.now(),
)
def test_get_soh_values_by_product(self):
values = LedgerBalancesIndicator._get_values_by_product(
'soh', 'case1', ['coke', 'surge', 'new_coke']
)
self.assertEqual(values['coke'], 32)
self.assertEqual(values['surge'], 85)
self.assertEqual(values['new_coke'], 0)
def test_get_consumption_by_product(self):
values = LedgerBalancesIndicator._get_values_by_product(
'consumption', 'case1', ['coke', 'surge', 'new_coke']
)
self.assertEqual(values['coke'], 63)
self.assertEqual(values['surge'], 0)
self.assertEqual(values['new_coke'], 0)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "bottleneck-"
cfg.versionfile_source = "bottleneck/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| |
#!/usr/bin/env python2
##############################################################################
# Copyright (c) 2012, GeoData Institute (www.geodata.soton.ac.uk)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##############################################################################
"""
Output mapcache configuration information to `node-gyp`
Configuration options are retrieved from environment variables set using `npm
config set`. This allows for a simple `npm install mapcache` to work.
"""
from optparse import OptionParser
import os
import re
import sys
def warn(msg):
print >> sys.stderr, msg
def die(msg):
warn('Configuration failed: %s' % msg)
sys.exit(1)
class ConfigError(Exception):
pass
class Config(object):
"""Base Class for obtaining mapcache configuration information"""
def __init__(self, build_dir):
self.build_dir = build_dir
def getLibDir(self):
return os.environ.get('npm_config_mapcache_lib_dir', '')
def getIncludeDir(self):
return os.path.join(self.build_dir, 'include')
def getCflags(self):
# add debugging flags and defines
if 'npm_config_mapcache_debug' in os.environ:
return '-DDEBUG -g -ggdb'
return ''
class AutoconfConfig(Config):
"""Class for obtaining mapcache configuration pre mapcache 1.0
Mapcache uses autotools for building and configuration in this version.
"""
def __init__(self, *args, **kwargs):
super(AutoconfConfig, self).__init__(*args, **kwargs)
makefile_inc = os.path.join(self.build_dir, 'Makefile.inc')
if not os.path.exists(makefile_inc):
raise ConfigError('Expected `Makefile.inc` in %s' % self.build_dir)
self.makefile_inc = makefile_inc
def getLibDir(self):
p = re.compile('^prefix *= *(.+)$') # match the prefix
with open(self.makefile_inc, 'r') as f:
for line in f:
match = p.match(line)
if match:
arg = match.groups()[0].strip()
if arg:
return os.path.join(arg, 'lib')
return ''
def getCflags(self):
# add includes from the Makefile
p = re.compile('^[A-Z]+_INC *= *(.+)$') # match an include header
matches = []
with open(self.makefile_inc, 'r') as f:
for line in f:
match = p.match(line)
if match:
arg = match.groups()[0].strip()
if arg:
matches.append(arg)
debug_flags = super(AutoconfConfig, self).getCflags()
if debug_flags:
matches.append(debug_flags)
return ' '.join(matches)
class CmakeConfig(Config):
"""Class for obtaining mapcache configuration for versions >= 1.0
Mapcache uses Cmake for building and configuration in this version.
"""
def __init__(self, *args, **kwargs):
super(CmakeConfig, self).__init__(*args, **kwargs)
cmake_cache = os.path.join(self.build_dir, 'CMakeCache.txt')
if not os.path.exists(cmake_cache):
raise ConfigError('Expected `CMakeCache.txt` in %s' % self.build_dir)
self.cmake_cache = cmake_cache
def getLibDir(self):
p = re.compile('^CMAKE_INSTALL_PREFIX:PATH *= *(.+)$') # match the prefix
with open(self.cmake_cache, 'r') as f:
for line in f:
match = p.match(line)
if match:
arg = match.groups()[0].strip()
if arg:
return os.path.join(arg, 'lib')
return ''
def getIncludeDir(self):
dirs = [os.path.join(self.build_dir, 'include')]
patterns = [ # a list of path patterns associated with paths to append to matches
(re.compile('^\w+_INCLUDE_DIR:PATH *= *(.+)$'), ), # match a library directory
(re.compile('^MapCache_SOURCE_DIR:STATIC *= *(.+)$'), 'include') # match the source directory
]
with open(self.cmake_cache, 'r') as f:
for line in f:
for p in patterns:
match = p[0].match(line)
if match:
arg = match.groups()[0].strip()
if arg:
dirs.append(os.path.join(arg, *p[1:]))
return ' '.join(dirs)
parser = OptionParser()
parser.add_option("--include",
action="store_true", default=False,
help="output the mapcache include path")
parser.add_option("--libraries",
action="store_true", default=False,
help="output the mapcache library link option")
parser.add_option("--ldflags",
action="store_true", default=False,
help="output the mapcache library rpath option")
parser.add_option("--cflags",
action="store_true", default=False,
help="output the mapcache cflag options")
(options, args) = parser.parse_args()
try:
build_dir = os.environ['npm_config_mapcache_build_dir']
except KeyError:
die('`npm config set mapcache:build_dir` has not been called')
# get the config object, trying the new cmake system first and falling back to
# the legacy autoconf build sytem
try:
try:
config = CmakeConfig(build_dir)
except ConfigError, e:
try:
config = AutoconfConfig(build_dir)
except ConfigError, e2:
warn("Failed to configure using Cmake: %s" % e)
warn("Attempting configuration using autotools...")
die(e2)
# output the requested options
if options.include:
print config.getIncludeDir()
if options.libraries:
lib_dir = config.getLibDir()
if lib_dir:
print "-L%s" % lib_dir
if options.ldflags:
# write the library path into the resulting binary
lib_dir = config.getLibDir()
if lib_dir:
print "-Wl,-rpath=%s" % lib_dir
if options.cflags:
print config.getCflags()
except ConfigError, e:
die(e)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import multiprocessing
import os
import platform
import uuid
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
help='Default vSwitch Name, '
'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
help='required for live migration among '
'hosts with different CPU features')
]
FLAGS = flags.FLAGS
FLAGS.register_opts(hyperv_opts)
class VMOps(baseops.BaseOps):
def __init__(self, volumeops):
super(VMOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
def list_instances(self):
""" Return the names of all the instances known to Hyper-V. """
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
"""Get information about the VM"""
LOG.debug(_("get_info called for instance"), instance=instance)
instance_name = instance["name"]
return self._get_info(instance_name)
def _get_info(self, instance_name):
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is None:
raise exception.InstanceNotFound(instance=instance_name)
vm = self._conn.Msvm_ComputerSystem(
ElementName=instance_name)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
summary_info = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)[1]
info = summary_info[0]
LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
state = str(constants.HYPERV_POWER_STATE[info.EnabledState])
memusage = str(info.MemoryUsage)
numprocs = str(info.NumberOfProcessors)
uptime = str(info.UpTime)
LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)s,"
" mem=%(memusage)s, num_cpu=%(numprocs)s,"
" uptime=%(uptime)s"), locals())
return {'state': state,
'max_mem': info.MemoryUsage,
'mem': info.MemoryUsage,
'num_cpu': info.NumberOfProcessors,
'cpu_time': info.UpTime}
def spawn(self, context, instance, image_meta, network_info,
block_device_info=None):
""" Create a new VM and start it."""
instance_name = instance["name"]
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is not None:
raise exception.InstanceExists(name=instance_name)
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
#If is not a boot from volume spawn
if not (ebs_root):
#Fetch the file, assume it is a VHD file.
vhdfile = self._vmutils.get_vhd_path(instance_name)
try:
self._cache_image(fn=self._vmutils.fetch_image,
context=context,
target=vhdfile,
fname=instance['image_ref'],
image_id=instance['image_ref'],
user=instance['user_id'],
project=instance['project_id'],
cow=FLAGS.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
self.destroy(instance)
try:
self._create_vm(instance)
if not ebs_root:
self._create_disk(instance['name'], vhdfile)
else:
self._volumeops.attach_boot_volume(block_device_info,
instance_name)
#A SCSI controller for volumes connection is created
self._create_scsi_controller(instance['name'])
for vif in network_info:
mac_address = vif['address'].replace(':', '')
self._create_nic(instance['name'], mac_address)
LOG.debug(_('Starting VM %s '), instance_name)
self._set_vm_state(instance['name'], 'Enabled')
LOG.info(_('Started VM %s '), instance_name)
except Exception as exn:
LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
raise
def _create_vm(self, instance):
"""Create a VM but don't start it. """
instance_name = instance["name"]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance_name
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
[], None, vs_gs_data.GetText_(1))[1:]
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(_('Failed to create VM %s') %
instance_name)
LOG.debug(_('Created VM %s...'), instance_name)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
vmsetting = [s for s in vmsettings
if s.SettingType == 3][0] # avoid snapshots
memsetting = vmsetting.associators(
wmi_result_class='Msvm_MemorySettingData')[0]
#No Dynamic Memory, so reservation, limit and quantity are identical.
mem = long(str(instance['memory_mb']))
memsetting.VirtualQuantity = mem
memsetting.Reservation = mem
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance_name)
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
if FLAGS.limit_cpu_features:
procsetting.LimitProcessorFeatures = True
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance_name)
def _create_scsi_controller(self, vm_name):
""" Create an iscsi controller ready to mount volumes """
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
scsicontrldefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
AND InstanceID LIKE '%Default%'")[0]
if scsicontrldefault is None:
raise vmutils.HyperVException(_('Controller not found'))
scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', scsicontrldefault)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
scsiresource = self._vmutils.add_virt_resource(self._conn,
scsicontrl, vm)
if scsiresource is None:
raise vmutils.HyperVException(
_('Failed to add scsi controller to VM %s') %
vm_name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(vhdfile)s') % locals())
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
and r.Address == "0"]
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', diskdflt)
#Set the IDE ctrller as parent.
diskdrive.Parent = ctrller[0].path_()
diskdrive.Address = 0
#Add the cloned disk drive object to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
diskdrive, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add diskdrive to VM %s') %
vm_name)
diskdrive_path = new_resources[0]
LOG.debug(_('New disk drive path is %s'), diskdrive_path)
#Find the default VHD disk object.
vhddefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
InstanceID LIKE '%Default%' ")[0]
#Clone the default and point it to the image file.
vhddisk = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_ResourceAllocationSettingData', vhddefault)
#Set the new drive as the parent.
vhddisk.Parent = diskdrive_path
vhddisk.Connection = [vhdfile]
#Add the new vhd object as a virtual hard disk to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
vhddisk, vm)
if new_resources is None:
raise vmutils.HyperVException(
_('Failed to add vhd file to VM %s') %
vm_name)
LOG.info(_('Created disk for %s'), vm_name)
def _create_nic(self, vm_name, mac):
"""Create a (synthetic) nic and attach it to the vm"""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
extswitch = self._find_external_network()
if extswitch is None:
raise vmutils.HyperVException(_('Cannot find vSwitch'))
vm = vms[0]
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
#Find the default nic and clone it to create a new nic for the vm.
#Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
#Linux Integration Components installed.
syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
default_nic_data = [n for n in syntheticnics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
'Msvm_SyntheticEthernetPortSettingData',
default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=vm_name,
ScopeOfResidence="",
VirtualSwitch=extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise vmutils.HyperVException(_('Failed creating port for %s') %
vm_name)
ext_path = extswitch.path_()
LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
% locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
new_nic_data.Address = mac
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
new_nic_data, vm)
if new_resources is None:
raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _find_external_network(self):
"""Find the vswitch that is connected to the physical nic.
Assumes only one physical nic on the host
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
% FLAGS.vswitch_name)
if FLAGS.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
% FLAGS.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
ElementName=FLAGS.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
if FLAGS.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
ElementName=FLAGS.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
def reboot(self, instance, network_info, reboot_type):
instance_name = instance["name"]
"""Reboot the specified instance."""
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is None:
raise exception.InstanceNotFound(instance_id=instance["id"])
self._set_vm_state(instance_name, 'Reboot')
def destroy(self, instance, network_info=None, cleanup=True):
"""Destroy the VM. Also destroy the associated VHD disk files"""
instance_name = instance["name"]
LOG.debug(_("Got request to destroy vm %s"), instance_name)
vm = self._vmutils.lookup(self._conn, instance_name)
if vm is None:
return
vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
self._set_vm_state(instance_name, 'Disabled')
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
disks = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
disk_files = []
volumes = [r for r in rasds
if r.ResourceSubType == 'Microsoft Physical Disk Drive']
volumes_drives_list = []
#collect the volumes information before destroying the VM.
for volume in volumes:
hostResources = volume.HostResource
drive_path = hostResources[0]
#Appending the Msvm_Disk path
volumes_drives_list.append(drive_path)
#Collect disk file information before destroying the VM.
for disk in disks:
disk_files.extend([c for c in disk.Connection])
#Nuke the VM. Does not destroy disks.
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
if not success:
raise vmutils.HyperVException(_('Failed to destroy vm %s') %
instance_name)
#Disconnect volumes
for volume_drive in volumes_drives_list:
self._volumeops.disconnect_volume(volume_drive)
#Delete associated vhd disk files.
for disk in disk_files:
vhdfile = self._conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
% locals())
vhdfile.Delete()
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Paused')
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def suspend(self, instance):
"""Suspend the specified instance."""
print instance
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"], 'Suspended')
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
"""Power on the specified instance"""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM"""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
(job, ret_val) = vms[0].RequestStateChange(
constants.REQ_POWER_STATE[req_state])
success = False
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
elif ret_val == 0:
success = True
elif ret_val == 32775:
#Invalid state for current operation. Typically means it is
#already in the state requested
success = True
if success:
LOG.info(_("Successfully changed vm state of %(vm_name)s"
" to %(req_state)s") % locals())
else:
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
LOG.error(msg)
raise vmutils.HyperVException(msg)
def _get_vcpu_total(self):
"""Get vcpu number of physical computer.
:returns: the number of cpu core.
"""
# On certain platforms, this will raise a NotImplementedError.
try:
return multiprocessing.cpu_count()
except NotImplementedError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "
"This error can be safely ignored for now."))
return 0
def _get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
total_kb = self._conn_cimv2.query(
"SELECT TotalVisibleMemorySize FROM win32_operatingsystem")[0]\
.TotalVisibleMemorySize
total_mb = long(total_kb) / 1024
return total_mb
def _get_local_gb_total(self):
"""Get the total hdd size(GB) of physical computer.
:returns:
The total amount of HDD(GB).
Note that this value shows a partition where
NOVA-INST-DIR/instances mounts.
"""
#TODO(jordanrinke): This binds to C only right now,
#need to bind to instance dir
total_kb = self._conn_cimv2.query(
"SELECT Size FROM win32_logicaldisk WHERE DriveType=3")[0].Size
total_gb = long(total_kb) / (1024 ** 3)
return total_gb
def _get_vcpu_used(self):
""" Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
#TODO(jordanrinke) figure out a way to count assigned VCPUs
total_vcpu = 0
return total_vcpu
def _get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
total_kb = self._conn_cimv2.query(
"SELECT FreePhysicalMemory FROM win32_operatingsystem")[0]\
.FreePhysicalMemory
total_mb = long(total_kb) / 1024
return total_mb
def _get_local_gb_used(self):
"""Get the free hdd size(GB) of physical computer.
:returns:
The total usage of HDD(GB).
Note that this value shows a partition where
NOVA-INST-DIR/instances mounts.
"""
#TODO(jordanrinke): This binds to C only right now,
#need to bind to instance dir
total_kb = self._conn_cimv2.query(
"SELECT FreeSpace FROM win32_logicaldisk WHERE DriveType=3")[0]\
.FreeSpace
total_gb = long(total_kb) / (1024 ** 3)
return total_gb
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
version = self._conn_cimv2.Win32_OperatingSystem()[0]\
.Version.replace('.', '')
LOG.info(_('Windows version: %s ') % version)
return version
def get_available_resource(self):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
LOG.info(_('get_available_resource called'))
# TODO(alexpilotti) implemented cpu_info
dic = {'vcpus': self._get_vcpu_total(),
'memory_mb': self._get_memory_mb_total(),
'local_gb': self._get_local_gb_total(),
'vcpus_used': self._get_vcpu_used(),
'memory_mb_used': self._get_memory_mb_used(),
'local_gb_used': self._get_local_gb_used(),
'hypervisor_type': "hyperv",
'hypervisor_version': self._get_hypervisor_version(),
'hypervisor_hostname': platform.node(),
'cpu_info': 'unknown'}
return dic
def _cache_image(self, fn, target, fname, cow=False, Size=None,
*args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
copy for use by the hypervisor.
The underlying method should specify a kwarg of target representing
where the image will be saved.
fname is used as the filename of the base image. The filename needs
to be unique to a given image.
If cow is True, it will make a CoW image instead of a copy.
"""
@utils.synchronized(fname)
def call_if_not_exists(path, fn, *args, **kwargs):
if not os.path.exists(path):
fn(target=path, *args, **kwargs)
if not os.path.exists(target):
LOG.debug(_("use_cow_image:%s"), cow)
if cow:
base = self._vmutils.get_base_vhd_path(fname)
call_if_not_exists(base, fn, *args, **kwargs)
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
(job, ret_val) = \
image_service.CreateDifferencingVirtualHardDisk(
Path=target, ParentPath=base)
LOG.debug(
"Creating difference disk: JobID=%s, Source=%s, Target=%s",
job, base, target)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(
_('Failed to create Difference Disk from '
'%(base)s to %(target)s') % locals())
else:
call_if_not_exists(target, fn, *args, **kwargs)
| |
from __future__ import division, print_function
import imp
import os
import sys
import pickle
import copy
import warnings
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from numpy._build_utils.apple_accelerate import (uses_accelerate_framework,
get_sgemv_fix)
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers=["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [16, 12, 8]
expected['Py_intptr_t'] = [8, 4]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [8, 4]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers=["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform == 'win32' or os.name == 'nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
join(codegen_dir, 'genapi.py'),
]
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources=[join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src'),
join(local_dir, 'src', 'private', 'templ_common.h.src')
]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'cblasfuncs.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'npy_config.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('src', 'private', 'mem_overlap.h'),
join('src', 'private', 'npy_extint128.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'mem_overlap.c'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
])
if uses_accelerate_framework(blas_info):
multiarray_src.extend(get_sgemv_fix())
else:
extra_info = {}
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'scalarmath.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources=umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends=deps + umath_deps,
libraries=['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources=[join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources=[join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources=[join('src', 'multiarray', 'multiarray_tests.c.src'),
join('src', 'private', 'mem_overlap.c')],
depends=[join('src', 'private', 'mem_overlap.h'),
join('src', 'private', 'npy_extint128.h')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources=[join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| |
# pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import six
from src.test.py.bazel import test_base
class RunfilesTest(test_base.TestBase):
def _AssertRunfilesLibraryInBazelToolsRepo(self, family, lang_name):
for s, t, exe in [("WORKSPACE.mock", "WORKSPACE",
False), ("foo/BUILD.mock", "foo/BUILD",
False), ("foo/foo.py", "foo/foo.py", True),
("foo/Foo.java", "foo/Foo.java",
False), ("foo/foo.sh", "foo/foo.sh",
True), ("foo/foo.cc", "foo/foo.cc", False),
("foo/datadep/hello.txt", "foo/datadep/hello.txt",
False), ("bar/BUILD.mock", "bar/BUILD",
False), ("bar/bar.py", "bar/bar.py", True),
("bar/bar-py-data.txt", "bar/bar-py-data.txt",
False), ("bar/Bar.java", "bar/Bar.java",
False), ("bar/bar-java-data.txt",
"bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh",
True), ("bar/bar-sh-data.txt", "bar/bar-sh-data.txt",
False), ("bar/bar.cc", "bar/bar.cc",
False), ("bar/bar-cc-data.txt",
"bar/bar-cc-data.txt", False)]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
exit_code, stdout, stderr = self.RunBazel(["info", "bazel-bin"])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
# TODO(brandjon): (Issue #8169) Make this test compatible with Python
# toolchains. Blocked on the fact that there's no PY3 environment on our Mac
# workers (bazelbuild/continuous-integration#578).
exit_code, _, stderr = self.RunBazel([
"build",
"--verbose_failures",
"--incompatible_use_python_toolchains=false",
"//foo:runfiles-" + family
])
self.AssertExitCode(exit_code, 0, stderr)
if test_base.TestBase.IsWindows():
bin_path = os.path.join(bazel_bin, "foo/runfiles-%s.exe" % family)
else:
bin_path = os.path.join(bazel_bin, "foo/runfiles-" + family)
self.assertTrue(os.path.exists(bin_path))
exit_code, stdout, stderr = self.RunProgram(
[bin_path], env_add={"TEST_SRCDIR": "__ignore_me__"})
self.AssertExitCode(exit_code, 0, stderr)
# 10 output lines: 2 from foo-<family>, and 2 from each of bar-<lang>.
if len(stdout) != 10:
self.fail("stdout: %s" % stdout)
self.assertEqual(stdout[0], "Hello %s Foo!" % lang_name)
six.assertRegex(self, stdout[1], "^rloc=.*/foo/datadep/hello.txt")
self.assertNotIn("__ignore_me__", stdout[1])
with open(stdout[1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines: %s" % lines)
self.assertEqual(lines[0], "world")
i = 2
for lang in [("py", "Python", "bar.py"), ("java", "Java", "Bar.java"),
("sh", "Bash", "bar.sh"), ("cc", "C++", "bar.cc")]:
self.assertEqual(stdout[i], "Hello %s Bar!" % lang[1])
six.assertRegex(self, stdout[i + 1],
"^rloc=.*/bar/bar-%s-data.txt" % lang[0])
self.assertNotIn("__ignore_me__", stdout[i + 1])
with open(stdout[i + 1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines(%s): %s" % (lang[0], lines))
self.assertEqual(lines[0], "data for " + lang[2])
i += 2
def testPythonRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("py", "Python")
def testJavaRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("java", "Java")
def testBashRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("sh", "Bash")
def testCppRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("cc", "C++")
def testRunfilesLibrariesFindRunfilesWithoutEnvvars(self):
for s, t, exe in [
("WORKSPACE.mock", "WORKSPACE", False),
("bar/BUILD.mock", "bar/BUILD", False),
("bar/bar.py", "bar/bar.py", True),
("bar/bar-py-data.txt", "bar/bar-py-data.txt", False),
("bar/Bar.java", "bar/Bar.java", False),
("bar/bar-java-data.txt", "bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh", True),
("bar/bar-sh-data.txt", "bar/bar-sh-data.txt", False),
("bar/bar.cc", "bar/bar.cc", False),
("bar/bar-cc-data.txt", "bar/bar-cc-data.txt", False),
]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
exit_code, stdout, stderr = self.RunBazel(["info", "bazel-bin"])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
exit_code, _, stderr = self.RunBazel([
"build", "--verbose_failures",
"//bar:bar-py", "//bar:bar-java", "//bar:bar-sh", "//bar:bar-cc"
])
self.AssertExitCode(exit_code, 0, stderr)
for lang in [("py", "Python", "bar.py"), ("java", "Java", "Bar.java"),
("sh", "Bash", "bar.sh"), ("cc", "C++", "bar.cc")]:
if test_base.TestBase.IsWindows():
bin_path = os.path.join(bazel_bin, "bar/bar-%s.exe" % lang[0])
else:
bin_path = os.path.join(bazel_bin, "bar/bar-" + lang[0])
self.assertTrue(os.path.exists(bin_path))
exit_code, stdout, stderr = self.RunProgram(
[bin_path],
env_remove=set([
"RUNFILES_MANIFEST_FILE",
"RUNFILES_MANIFEST_ONLY",
"RUNFILES_DIR",
"JAVA_RUNFILES",
]),
env_add={"TEST_SRCDIR": "__ignore_me__"})
self.AssertExitCode(exit_code, 0, stderr)
if len(stdout) < 2:
self.fail("stdout(%s): %s" % (lang[0], stdout))
self.assertEqual(stdout[0], "Hello %s Bar!" % lang[1])
six.assertRegex(self, stdout[1], "^rloc=.*/bar/bar-%s-data.txt" % lang[0])
self.assertNotIn("__ignore_me__", stdout[1])
with open(stdout[1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines(%s): %s" % (lang[0], lines))
self.assertEqual(lines[0], "data for " + lang[2])
def testRunfilesLibrariesFindRunfilesWithRunfilesManifestEnvvar(self):
for s, t, exe in [
("WORKSPACE.mock", "WORKSPACE", False),
("bar/BUILD.mock", "bar/BUILD", False),
# Note: do not test Python here, because py_binary always needs a
# runfiles tree, even on Windows, because it needs __init__.py files in
# every directory where there may be importable modules, so Bazel always
# needs to create a runfiles tree for py_binary.
("bar/Bar.java", "bar/Bar.java", False),
("bar/bar-java-data.txt", "bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh", True),
("bar/bar-sh-data.txt", "bar/bar-sh-data.txt", False),
("bar/bar.cc", "bar/bar.cc", False),
("bar/bar-cc-data.txt", "bar/bar-cc-data.txt", False),
]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
exit_code, stdout, stderr = self.RunBazel(["info", "bazel-bin"])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
for lang in [("java", "Java"), ("sh", "Bash"), ("cc", "C++")]:
exit_code, _, stderr = self.RunBazel([
"build", "--verbose_failures", "--enable_runfiles=no",
"//bar:bar-" + lang[0]
])
self.AssertExitCode(exit_code, 0, stderr)
if test_base.TestBase.IsWindows():
bin_path = os.path.join(bazel_bin, "bar/bar-%s.exe" % lang[0])
else:
bin_path = os.path.join(bazel_bin, "bar/bar-" + lang[0])
manifest_path = bin_path + ".runfiles_manifest"
self.assertTrue(os.path.exists(bin_path))
self.assertTrue(os.path.exists(manifest_path))
# Create a copy of the runfiles manifest, replacing
# "bar/bar-<lang>-data.txt" with a custom file.
mock_bar_dep = self.ScratchFile("bar-%s-mockdata.txt" % lang[0],
["mock %s data" % lang[0]])
if test_base.TestBase.IsWindows():
# Runfiles manifests use forward slashes as path separators, even on
# Windows.
mock_bar_dep = mock_bar_dep.replace("\\", "/")
manifest_key = "foo_ws/bar/bar-%s-data.txt" % lang[0]
mock_manifest_line = manifest_key + " " + mock_bar_dep
with open(manifest_path, "rt") as f:
# Only rstrip newlines. Do not rstrip() completely, because that would
# remove spaces too. This is necessary in order to have at least one
# space in every manifest line.
# Some manifest entries don't have any path after this space, namely the
# "__init__.py" entries. (Bazel writes such manifests on every
# platform). The reason is that these files are never symlinks in the
# runfiles tree, Bazel actually creates empty __init__.py files (again
# on every platform). However to keep these manifest entries correct,
# they need to have a space character.
# We could probably strip thses lines completely, but this test doesn't
# aim to exercise what would happen in that case.
mock_manifest_data = [
mock_manifest_line
if line.split(" ", 1)[0] == manifest_key else line.rstrip("\n\r")
for line in f
]
substitute_manifest = self.ScratchFile(
"mock-%s.runfiles/MANIFEST" % lang[0], mock_manifest_data)
exit_code, stdout, stderr = self.RunProgram(
[bin_path],
env_remove=set(["RUNFILES_DIR"]),
env_add={
# On Linux/macOS, the Java launcher picks up JAVA_RUNFILES and
# ignores RUNFILES_MANIFEST_FILE.
"JAVA_RUNFILES": substitute_manifest[:-len("/MANIFEST")],
# On Windows, the Java launcher picks up RUNFILES_MANIFEST_FILE.
# The C++ runfiles library picks up RUNFILES_MANIFEST_FILE on all
# platforms.
"RUNFILES_MANIFEST_FILE": substitute_manifest,
"RUNFILES_MANIFEST_ONLY": "1",
"TEST_SRCDIR": "__ignore_me__",
})
self.AssertExitCode(exit_code, 0, stderr)
if len(stdout) < 2:
self.fail("stdout: %s" % stdout)
self.assertEqual(stdout[0], "Hello %s Bar!" % lang[1])
six.assertRegex(self, stdout[1], "^rloc=" + mock_bar_dep)
self.assertNotIn("__ignore_me__", stdout[1])
with open(stdout[1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines: %s" % lines)
self.assertEqual(lines[0], "mock %s data" % lang[0])
def testLegacyExternalRunfilesOption(self):
self.ScratchDir("A")
self.ScratchFile("A/WORKSPACE")
self.ScratchFile("A/BUILD", [
"py_library(",
" name = 'lib',",
" srcs = ['lib.py'],",
" visibility = ['//visibility:public'],",
")",
])
self.ScratchFile("A/lib.py")
work_dir = self.ScratchDir("B")
self.ScratchFile("B/WORKSPACE",
["local_repository(name = 'A', path='../A')"])
self.ScratchFile("B/bin.py")
self.ScratchFile("B/BUILD", [
"py_binary(",
" name = 'bin',",
" srcs = ['bin.py'],",
" deps = ['@A//:lib'],",
")",
"",
"genrule(",
" name = 'gen',",
" outs = ['output'],",
" cmd = 'echo $(location //:bin) > $@',",
" tools = ['//:bin'],",
")",
])
exit_code, stdout, stderr = self.RunBazel(
args=["info", "output_path"], cwd=work_dir)
self.AssertExitCode(exit_code, 0, stderr)
bazel_output = stdout[0]
exit_code, _, stderr = self.RunBazel(
args=["build", "--nolegacy_external_runfiles", ":gen"], cwd=work_dir)
self.AssertExitCode(exit_code, 0, stderr)
if self.IsWindows():
manifest_path = os.path.join(bazel_output,
"host/bin/bin.exe.runfiles_manifest")
else:
manifest_path = os.path.join(bazel_output,
"host/bin/bin.runfiles_manifest")
self.AssertFileContentNotContains(manifest_path, "__main__/external/A")
if __name__ == "__main__":
unittest.main()
| |
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import pycassa
from pycassa import ColumnFamily
from pycassa.batch import Mutator
from pycassa.system_manager import SystemManager, SIMPLE_STRATEGY
from pycassa.pool import AllServersUnavailable
import gevent
from vnc_api import vnc_api
from exceptions import NoIdError, DatabaseUnavailableError
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
ConnectionType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
import time
from cfgm_common import jsonutils as json
import utils
import functools
import datetime
import re
from operator import itemgetter
class VncCassandraClient(object):
# Name to ID mapping keyspace + tables
_UUID_KEYSPACE_NAME = 'config_db_uuid'
# TODO describe layout
_OBJ_UUID_CF_NAME = 'obj_uuid_table'
# TODO describe layout
_OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
_MAX_COL = 10000000
@classmethod
def get_db_info(cls):
db_info = [(cls._UUID_KEYSPACE_NAME, [cls._OBJ_UUID_CF_NAME,
cls._OBJ_FQ_NAME_CF_NAME])]
return db_info
# end get_db_info
def __init__(self, server_list, db_prefix, keyspaces, logger,
generate_url=None, reset_config=[], credential=None):
self._re_match_parent = re.compile('parent:')
self._re_match_prop = re.compile('prop:')
self._re_match_ref = re.compile('ref:')
self._re_match_backref = re.compile('backref:')
self._re_match_children = re.compile('children:')
# bind CRUDL methods to all types
for resource_type in vnc_api.all_resource_types:
obj_type = resource_type.replace('-', '_')
for oper in ('create', 'read', 'update', 'delete', 'list',
'count_children'):
method = getattr(self, '_object_%s' %(oper))
bound_method = functools.partial(method, resource_type)
functools.update_wrapper(bound_method, method)
setattr(self, '_cassandra_%s_%s' %(obj_type, oper),
bound_method)
self._reset_config = reset_config
self._cache_uuid_to_fq_name = {}
if db_prefix:
self._db_prefix = '%s_' %(db_prefix)
else:
self._db_prefix = ''
self._server_list = server_list
self._num_dbnodes = len(self._server_list)
self._conn_state = ConnectionStatus.INIT
self._logger = logger
self._credential = credential
# if no generate_url is specified, use a dummy function that always
# returns an empty string
self._generate_url = generate_url or (lambda x,y: '')
self._cf_dict = {}
self._keyspaces = {
self._UUID_KEYSPACE_NAME: [(self._OBJ_UUID_CF_NAME, None),
(self._OBJ_FQ_NAME_CF_NAME, None)]}
if keyspaces:
self._keyspaces.update(keyspaces)
self._cassandra_init(server_list)
self._cache_uuid_to_fq_name = {}
self._obj_uuid_cf = self._cf_dict[self._OBJ_UUID_CF_NAME]
self._obj_fq_name_cf = self._cf_dict[self._OBJ_FQ_NAME_CF_NAME]
# end __init__
def _update_sandesh_status(self, status, msg=''):
ConnectionState.update(conn_type=ConnectionType.DATABASE,
name='Cassandra', status=status, message=msg,
server_addrs=self._server_list)
def _handle_exceptions(self, func):
def wrapper(*args, **kwargs):
try:
if self._conn_state != ConnectionStatus.UP:
# will set conn_state to UP if successful
self._cassandra_init_conn_pools()
return func(*args, **kwargs)
except AllServersUnavailable as e:
if self._conn_state != ConnectionStatus.DOWN:
self._update_sandesh_status(ConnectionStatus.DOWN)
msg = 'Cassandra connection down. Exception in %s' \
%(str(func))
self._logger(msg, level=SandeshLevel.SYS_ERR)
self._conn_state = ConnectionStatus.DOWN
raise DatabaseUnavailableError(
'Error, AllServersUnavailable: %s'
%(utils.detailed_traceback()))
return wrapper
# end _handle_exceptions
# Helper routines for cassandra
def _cassandra_init(self, server_list):
# 1. Ensure keyspace and schema/CFs exist
# 2. Read in persisted data and publish to ifmap server
self._update_sandesh_status(ConnectionStatus.INIT)
ColumnFamily.get = self._handle_exceptions(ColumnFamily.get)
ColumnFamily.multiget = self._handle_exceptions(ColumnFamily.multiget)
ColumnFamily.xget = self._handle_exceptions(ColumnFamily.xget)
ColumnFamily.get_range = self._handle_exceptions(ColumnFamily.get_range)
ColumnFamily.insert = self._handle_exceptions(ColumnFamily.insert)
ColumnFamily.remove = self._handle_exceptions(ColumnFamily.remove)
Mutator.send = self._handle_exceptions(Mutator.send)
for ks,cf_list in self._keyspaces.items():
keyspace = '%s%s' %(self._db_prefix, ks)
self._cassandra_ensure_keyspace(server_list, keyspace, cf_list)
self._cassandra_init_conn_pools()
# end _cassandra_init
def _cassandra_system_manager(self):
# Retry till cassandra is up
server_idx = 0
connected = False
while not connected:
try:
cass_server = self._server_list[server_idx]
sys_mgr = SystemManager(cass_server, credentials=self._credential)
connected = True
except Exception:
# TODO do only for
# thrift.transport.TTransport.TTransportException
server_idx = (server_idx + 1) % self._num_dbnodes
time.sleep(3)
return sys_mgr
# end _cassandra_system_manager
def _cassandra_ensure_keyspace(self, server_list,
keyspace_name, cf_info_list):
sys_mgr = self._cassandra_system_manager()
if keyspace_name in self._reset_config:
try:
sys_mgr.drop_keyspace(keyspace_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
try:
sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
{'replication_factor': str(self._num_dbnodes)})
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
gc_grace_sec = 0
if self._num_dbnodes > 1:
gc_grace_sec = 60
for cf_info in cf_info_list:
try:
(cf_name, comparator_type) = cf_info
if comparator_type:
sys_mgr.create_column_family(
keyspace_name, cf_name,
comparator_type=comparator_type,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type')
else:
sys_mgr.create_column_family(keyspace_name, cf_name,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type')
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
sys_mgr.alter_column_family(keyspace_name, cf_name,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type')
# end _cassandra_ensure_keyspace
def _cassandra_init_conn_pools(self):
for ks,cf_list in self._keyspaces.items():
pool = pycassa.ConnectionPool(
ks, self._server_list, max_overflow=-1, use_threadlocal=True,
prefill=True, pool_size=20, pool_timeout=120,
max_retries=-1, timeout=5, credentials=self._credential)
rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
for (cf, _) in cf_list:
self._cf_dict[cf] = ColumnFamily(
pool, cf, read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
ConnectionState.update(conn_type = ConnectionType.DATABASE,
name = 'Cassandra', status = ConnectionStatus.UP, message = '',
server_addrs = self._server_list)
self._conn_state = ConnectionStatus.UP
msg = 'Cassandra connection ESTABLISHED'
self._logger(msg, level=SandeshLevel.SYS_NOTICE)
# end _cassandra_init_conn_pools
def _get_resource_class(self, obj_type):
cls_name = '%s' %(utils.CamelCase(obj_type.replace('-', '_')))
return getattr(vnc_api, cls_name)
# end _get_resource_class
def _object_create(self, res_type, obj_ids, obj_dict):
obj_type = res_type.replace('-', '_')
obj_class = self._get_resource_class(obj_type)
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
bch = self._obj_uuid_cf.batch()
obj_cols = {}
obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
obj_cols['type'] = json.dumps(obj_type)
if 'parent_type' in obj_dict:
# non config-root child
parent_type = obj_dict['parent_type']
parent_method_type = parent_type.replace('-', '_')
parent_fq_name = obj_dict['fq_name'][:-1]
obj_cols['parent_type'] = json.dumps(parent_type)
parent_uuid = self.fq_name_to_uuid(parent_method_type, parent_fq_name)
self._create_child(bch, parent_method_type, parent_uuid, obj_type, obj_ids['uuid'])
# Properties
for prop_field in obj_class.prop_fields:
field = obj_dict.get(prop_field)
if field is None:
continue
if prop_field == 'id_perms':
field['created'] = datetime.datetime.utcnow().isoformat()
field['last_modified'] = field['created']
self._create_prop(bch, obj_ids['uuid'], prop_field, field)
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_type, ref_link_type, _ = obj_class.ref_field_types[ref_field]
refs = obj_dict.get(ref_field, [])
for ref in refs:
ref_uuid = self.fq_name_to_uuid(ref_type, ref['to'])
ref_attr = ref.get('attr')
ref_data = {'attr': ref_attr, 'is_weakref': False}
self._create_ref(bch, obj_type, obj_ids['uuid'],
ref_type.replace('-', '_'), ref_uuid, ref_data)
bch.insert(obj_ids['uuid'], obj_cols)
bch.send()
# Update fqname table
fq_name_str = ':'.join(obj_dict['fq_name'])
fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_ids['uuid']: json.dumps(None)}
self._obj_fq_name_cf.insert(obj_type, fq_name_cols)
return (True, '')
# end _object_create
def _object_read(self, res_type, obj_uuids, field_names=None):
# if field_names=None, all fields will be read/returned
obj_type = res_type.replace('-', '_')
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
# optimize for common case of reading non-backref, non-children fields
# ignoring columns starting from 'b' and 'c' - significant performance
# impact in scaled setting. e.g. read of project
if (field_names is None or
(set(field_names) & (obj_class.backref_fields | obj_class.children_fields))):
# atleast one backref/children field is needed
obj_rows = obj_uuid_cf.multiget(obj_uuids,
column_count=self._MAX_COL,
include_timestamp=True)
else: # ignore reading backref + children columns
obj_rows = obj_uuid_cf.multiget(obj_uuids,
column_start='d',
column_count=self._MAX_COL,
include_timestamp=True)
if (len(obj_uuids) == 1) and not obj_rows:
raise NoIdError(obj_uuids[0])
results = []
for row_key in obj_rows:
obj_uuid = row_key
obj_cols = obj_rows[obj_uuid]
result = {}
result['uuid'] = obj_uuid
result['fq_name'] = json.loads(obj_cols['fq_name'][0])
for col_name in obj_cols.keys():
if self._re_match_parent.match(col_name):
# non config-root child
(_, _, parent_uuid) = col_name.split(':')
parent_type = json.loads(obj_cols['parent_type'][0])
result['parent_type'] = parent_type
try:
result['parent_uuid'] = parent_uuid
result['parent_href'] = self._generate_url(parent_type, parent_uuid)
except NoIdError:
err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
return (False, err_msg)
if self._re_match_prop.match(col_name):
(_, prop_name) = col_name.split(':')
result[prop_name] = json.loads(obj_cols[col_name][0])
if self._re_match_children.match(col_name):
(_, child_type, child_uuid) = col_name.split(':')
if field_names and '%ss' %(child_type) not in field_names:
continue
child_tstamp = obj_cols[col_name][1]
try:
self._read_child(result, obj_uuid, child_type, child_uuid, child_tstamp)
except NoIdError:
continue
if self._re_match_ref.match(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
self._read_ref(result, obj_uuid, ref_type, ref_uuid, obj_cols[col_name][0])
if self._re_match_backref.match(col_name):
(_, back_ref_type, back_ref_uuid) = col_name.split(':')
if field_names and '%s_back_refs' %(back_ref_type) not in field_names:
continue
try:
self._read_back_ref(result, obj_uuid, back_ref_type, back_ref_uuid,
obj_cols[col_name][0])
except NoIdError:
continue
# for all column names
# sort children by creation time
for child_field in obj_class.children_fields:
if child_field not in result:
continue
sorted_children = sorted(result[child_field],
key = itemgetter('tstamp'))
# re-write result's children without timestamp
result[child_field] = sorted_children
[child.pop('tstamp') for child in result[child_field]]
# for all children
results.append(result)
# end for all rows
return (True, results)
# end _object_read
def _object_count_children(self, res_type, obj_uuid, child_type):
if child_type is None:
return (False, '')
obj_type = res_type.replace('-', '_')
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
if child_type not in obj_class.children_fields:
return (False,
'%s is not a child type of %s' %(child_type, obj_type))
col_start = 'children:'+child_type[:-1]+':'
col_finish = 'children:'+child_type[:-1]+';'
num_children = obj_uuid_cf.get_count(obj_uuid,
column_start=col_start,
column_finish=col_finish,
max_count=self._MAX_COL)
return (True, num_children)
# end _object_count_children
def _object_update(self, res_type, obj_uuid, new_obj_dict):
obj_type = res_type.replace('-', '_')
obj_class = self._get_resource_class(obj_type)
# Grab ref-uuids and properties in new version
new_ref_infos = {}
# Properties
new_props = {}
for prop_field in obj_class.prop_fields:
if prop_field in new_obj_dict:
new_props[prop_field] = new_obj_dict[prop_field]
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_type, ref_link_type, is_weakref = \
obj_class.ref_field_types[ref_field]
ref_obj_type = ref_type.replace('-', '_')
if ref_field in new_obj_dict:
new_refs = new_obj_dict[ref_field]
new_ref_infos[ref_obj_type] = {}
for new_ref in new_refs or []:
new_ref_uuid = self.fq_name_to_uuid(ref_type, new_ref['to'])
new_ref_attr = new_ref.get('attr')
new_ref_data = {'attr': new_ref_attr, 'is_weakref': is_weakref}
new_ref_infos[ref_obj_type][new_ref_uuid] = new_ref_data
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
obj_uuid_cf = self._obj_uuid_cf
obj_cols_iter = obj_uuid_cf.xget(obj_uuid)
# TODO optimize this (converts tuple to dict)
obj_cols = {}
for col_info in obj_cols_iter:
obj_cols[col_info[0]] = col_info[1]
bch = obj_uuid_cf.batch()
for col_name in obj_cols.keys():
if re.match('prop:', col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
# id-perms always has to be updated for last-mod timestamp
# get it from request dict(or from db if not in request dict)
new_id_perms = new_obj_dict.get(prop_name, json.loads(obj_cols[col_name]))
self.update_last_modified(bch, obj_uuid, new_id_perms)
elif prop_name in new_obj_dict:
self._update_prop(bch, obj_uuid, prop_name, new_props)
if re.match('ref:', col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
self._update_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid, new_ref_infos)
# for all column names
# create new refs
for ref_type in new_ref_infos.keys():
for ref_uuid in new_ref_infos[ref_type].keys():
ref_data = new_ref_infos[ref_type][ref_uuid]
self._create_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid, ref_data)
# create new props
for prop_name in new_props.keys():
self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
bch.send()
return (True, '')
# end _object_update
def _object_list(self, res_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False, filters=None):
obj_type = res_type.replace('-', '_')
obj_class = self._get_resource_class(obj_type)
children_fq_names_uuids = []
if filters:
fnames = filters.get('field_names', [])
fvalues = filters.get('field_values', [])
filter_fields = [(fnames[i], fvalues[i]) for i in range(len(fnames))]
else:
filter_fields = []
def filter_rows(coll_infos, filter_cols, filter_params):
filt_infos = {}
coll_rows = obj_uuid_cf.multiget(coll_infos.keys(),
columns=filter_cols,
column_count=self._MAX_COL)
for row in coll_rows:
# give chance for zk heartbeat/ping
gevent.sleep(0)
full_match = True
for fname, fval in filter_params:
if coll_rows[row]['prop:%s' %(fname)] != fval:
full_match = False
break
if full_match:
filt_infos[row] = coll_infos[row]
return filt_infos
# end filter_rows
def get_fq_name_uuid_list(obj_uuids):
ret_list = []
for obj_uuid in obj_uuids:
try:
obj_fq_name = self.uuid_to_fq_name(obj_uuid)
ret_list.append((obj_fq_name, obj_uuid))
except NoIdError:
pass
return ret_list
# end get_fq_name_uuid_list
if parent_uuids:
# go from parent to child
obj_uuid_cf = self._obj_uuid_cf
col_start = 'children:%s:' %(obj_type)
col_fin = 'children:%s;' %(obj_type)
try:
obj_rows = obj_uuid_cf.multiget(parent_uuids,
column_start=col_start,
column_finish=col_fin,
column_count=self._MAX_COL,
include_timestamp=True)
except pycassa.NotFoundException:
if count:
return (True, 0)
else:
return (True, children_fq_names_uuids)
def filter_rows_parent_anchor(sort=False):
# flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in obj_rows.keys()
for cols in obj_rows[obj_key].items()]
all_child_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
child_uuid = col_name.split(':')[2]
if obj_uuids and child_uuid not in obj_uuids:
continue
all_child_infos[child_uuid] = {'uuid': child_uuid, 'tstamp': col_val_ts[1]}
filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
if filter_cols:
filt_child_infos = filter_rows(all_child_infos, filter_cols, filter_fields)
else: # no filter specified
filt_child_infos = all_child_infos
if not sort:
ret_child_infos = filt_child_infos.values()
else:
ret_child_infos = sorted(filt_child_infos.values(), key=itemgetter('tstamp'))
return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
# end filter_rows_parent_anchor
if count:
return (True, len(filter_rows_parent_anchor()))
children_fq_names_uuids = filter_rows_parent_anchor(sort=True)
if back_ref_uuids:
# go from anchor to backrefs
obj_uuid_cf = self._obj_uuid_cf
col_start = 'backref:%s:' %(obj_type)
col_fin = 'backref:%s;' %(obj_type)
try:
obj_rows = obj_uuid_cf.multiget(back_ref_uuids,
column_start=col_start,
column_finish=col_fin,
column_count=self._MAX_COL,
include_timestamp=True)
except pycassa.NotFoundException:
if count:
return (True, 0)
else:
return (True, children_fq_names_uuids)
def filter_rows_backref_anchor():
# flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in obj_rows.keys()
for cols in obj_rows[obj_key].items()]
all_backref_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
backref_uuid = col_name.split(':')[2]
if obj_uuids and backref_uuid not in obj_uuids:
continue
all_backref_infos[backref_uuid] = \
{'uuid': backref_uuid, 'tstamp': col_val_ts[1]}
filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
if filter_cols:
filt_backref_infos = filter_rows(
all_backref_infos, filter_cols, filter_fields)
else: # no filter specified
filt_backref_infos = all_backref_infos
return get_fq_name_uuid_list(r['uuid'] for r in filt_backref_infos.values())
# end filter_rows_backref_anchor
if count:
return (True, len(filter_rows_backref_anchor()))
children_fq_names_uuids = filter_rows_backref_anchor()
if not parent_uuids and not back_ref_uuids:
obj_uuid_cf = self._obj_uuid_cf
if obj_uuids:
# exact objects specified
def filter_rows_object_list():
all_obj_infos = {}
for obj_uuid in obj_uuids:
all_obj_infos[obj_uuid] = None
filter_cols = ['prop:%s' %(fname)
for fname, _ in filter_fields]
if filter_cols:
filt_obj_infos = filter_rows(
all_obj_infos, filter_cols, filter_fields)
else: # no filters specified
filt_obj_infos = all_obj_infos
return get_fq_name_uuid_list(filt_obj_infos.keys())
# end filter_rows_object_list
if count:
return (True, len(filter_rows_object_list()))
children_fq_names_uuids = filter_rows_object_list()
else: # grab all resources of this type
obj_fq_name_cf = self._obj_fq_name_cf
try:
cols = obj_fq_name_cf.get('%s' %(obj_type),
column_count=self._MAX_COL)
except pycassa.NotFoundException:
if count:
return (True, 0)
else:
return (True, children_fq_names_uuids)
def filter_rows_no_anchor():
all_obj_infos = {}
for col_name, col_val in cols.items():
# give chance for zk heartbeat/ping
gevent.sleep(0)
col_name_arr = utils.decode_string(col_name).split(':')
obj_uuid = col_name_arr[-1]
all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
filter_cols = ['prop:%s' %(fname) for fname, _ in filter_fields]
if filter_cols:
filt_obj_infos = filter_rows(all_obj_infos, filter_cols, filter_fields)
else: # no filters specified
filt_obj_infos = all_obj_infos
return filt_obj_infos.values()
# end filter_rows_no_anchor
if count:
return (True, len(filter_rows_no_anchor()))
children_fq_names_uuids = filter_rows_no_anchor()
return (True, children_fq_names_uuids)
# end _object_list
def _object_delete(self, res_type, obj_uuid):
obj_type = res_type.replace('-', '_')
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
fq_name = json.loads(
obj_uuid_cf.get(obj_uuid, columns=['fq_name'])['fq_name'])
bch = obj_uuid_cf.batch()
# unlink from parent
col_start = 'parent:'
col_fin = 'parent;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, parent_type, parent_uuid) = col_name.split(':')
self._delete_child(
bch, parent_type, parent_uuid, obj_type, obj_uuid)
# remove refs
col_start = 'ref:'
col_fin = 'ref;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, ref_type, ref_uuid) = col_name.split(':')
self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
bch.remove(obj_uuid)
bch.send()
# Update fqname table
fq_name_str = ':'.join(fq_name)
fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
self._obj_fq_name_cf.remove(obj_type, columns = [fq_name_col])
return (True, '')
# end _object_delete
def read(self, method_name, *args, **kwargs):
method = getattr(self, '_cassandra_%s_read' % (method_name))
return method(*args, **kwargs)
# end read
def count_children(self, method_name, *args, **kwargs):
method = getattr(self, '_cassandra_%s_count_children' % (method_name))
return method(*args, **kwargs)
# end count_children
def list(self, method_name, *args, **kwargs):
method = getattr(self, '_cassandra_%s_list' % (method_name))
return method(*args, **kwargs)
# end list
def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
try:
del self._cache_uuid_to_fq_name[id]
except KeyError:
pass
# end cache_uuid_to_fq_name_del
def uuid_to_fq_name(self, id):
try:
return self._cache_uuid_to_fq_name[id][0]
except KeyError:
try:
obj = self._obj_uuid_cf.get(id, columns=['fq_name', 'type'])
except pycassa.NotFoundException:
raise NoIdError(id)
fq_name = json.loads(obj['fq_name'])
obj_type = json.loads(obj['type'])
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return fq_name
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
return self._cache_uuid_to_fq_name[id][1]
except KeyError:
try:
obj = self._obj_uuid_cf.get(id, columns=['fq_name', 'type'])
except pycassa.NotFoundException:
raise NoIdError(id)
fq_name = json.loads(obj['fq_name'])
obj_type = json.loads(obj['type'])
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return obj_type
# end uuid_to_obj_type
def fq_name_to_uuid(self, obj_type, fq_name):
method_name = obj_type.replace('-', '_')
fq_name_str = ':'.join(fq_name)
col_start = '%s:' % (utils.encode_string(fq_name_str))
col_fin = '%s;' % (utils.encode_string(fq_name_str))
try:
col_info_iter = self._obj_fq_name_cf.xget(
method_name, column_start=col_start, column_finish=col_fin)
except pycassa.NotFoundException:
raise NoIdError('%s %s' % (obj_type, fq_name))
col_infos = list(col_info_iter)
if len(col_infos) == 0:
raise NoIdError('%s %s' % (obj_type, fq_name))
for (col_name, col_val) in col_infos:
obj_uuid = col_name.split(':')[-1]
return obj_uuid
# end fq_name_to_uuid
def _read_child(self, result, obj_uuid, child_type,
child_uuid, child_tstamp):
if '%ss' % (child_type) not in result:
result['%ss' % (child_type)] = []
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['href'] = self._generate_url(child_type, child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_type)].append(child_info)
# end _read_child
def _read_ref(self, result, obj_uuid, ref_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_type) not in result:
result['%s_refs' % (ref_type)] = []
ref_data = json.loads(ref_data_json)
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['href'] = self._generate_url(ref_type, ref_uuid)
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_type,
back_ref_uuid, back_ref_data_json):
if '%s_back_refs' % (back_ref_type) not in result:
result['%s_back_refs' % (back_ref_type)] = []
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = json.loads(back_ref_data_json)
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['href'] = self._generate_url(back_ref_type, back_ref_uuid)
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_type)].append(back_ref_info)
# end _read_back_ref
| |
#!/usr/bin/env python
## category General
## desc Update read names with a suffix (for merging)
'''
Tags each read in a BAM file
Currently supported tags:
-suffix
-xs
-orig-ref
-orig-pos
-orig-cigar
-junction
'''
import sys
import os
from ngsutils.bam import bam_iter, cigar_tostr
import pysam
class BamWriter(object):
def __init__(self, outname, infname):
self.outname = outname
self.inbam = pysam.Samfile(infname, "rb")
def run_chain(self, chain):
tmp = os.path.join(os.path.dirname(self.outname), '.tmp.%s' % os.path.basename(self.outname))
outbam = pysam.Samfile(tmp, 'wb', template=self.inbam)
for read in chain.filter(self.inbam):
outbam.write(read)
self.inbam.close()
outbam.close()
if os.path.exists(outfname):
os.unlink(outfname) # Not really needed on *nix
os.rename(tmp, outfname)
class BamReader(object):
def filter(self, bam):
for read in bam_iter(bam):
yield read
class Suffix(object):
def __init__(self, parent, suffix):
self.parent = parent
self.suffix = suffix
def filter(self, bam):
for read in self.parent.filter(bam):
read.qname = "%s%s" % (read.qname, self.suffix)
yield read
class PredictJunction(object):
def __init__(self, parent, tag):
self.parent = parent
self.tag = tag
def filter(self, bam):
for read in self.parent.filter(bam):
pos = read.pos
juncs = []
for op, size in read.cigar:
if op == 0 or op == 2:
pos += size
elif op == 3:
juncs.append('%s:%s>%s' % (bam.references[read.tid], pos, pos+size))
if juncs:
read.tags = read.tags + [(self.tag, ','.join(juncs))]
yield read
class OrigRef(object):
def __init__(self, parent, tag):
self.parent = parent
self.tag = tag
def filter(self, bam):
refs = list(bam.references)
for read in self.parent.filter(bam):
if not read.is_unmapped:
read.tags = read.tags + [(self.tag, refs[read.tid])]
else:
read.tags = read.tags + [(self.tag, '*')]
yield read
class OrigPos(object):
def __init__(self, parent, tag):
self.parent = parent
self.tag = tag
def filter(self, bam):
for read in self.parent.filter(bam):
if not read.is_unmapped:
read.tags = read.tags + [(self.tag, read.pos)]
else:
read.tags = read.tags + [(self.tag, -1)]
yield read
class OrigCIGAR(object):
def __init__(self, parent, tag):
self.parent = parent
self.tag = tag
def filter(self, bam):
for read in self.parent.filter(bam):
if not read.is_unmapped:
read.tags = read.tags + [(self.tag, cigar_tostr(read.cigar))]
else:
read.tags = read.tags + [(self.tag, '*')]
yield read
class Tag(object):
def __init__(self, parent, tag):
self.parent = parent
spl = tag.rsplit(':', 1)
self.key = spl[0]
if self.key[-2:].lower() == ':i':
self.value = int(spl[1])
elif self.key[-2:].lower() == ':f':
self.value = float(spl[1])
else:
self.value = spl[1]
if ':' in self.key:
self.key = self.key.split(':')[0]
def filter(self, bam):
for read in self.parent.filter(bam):
read.tags = read.tags + [(self.key, self.value)]
yield read
class CufflinksXS(object):
def __init__(self, parent):
self.parent = parent
def filter(self, bam):
for read in self.parent.filter(bam):
if not read.is_unmapped:
if read.is_reverse:
read.tags = read.tags + [('XS', '-')]
else:
read.tags = read.tags + [('XS', '+')]
yield read
def usage():
print __doc__
print """\
Usage: bamutils tag {opts} in.bamfile out.bamfile
Arguments:
in.bamfile The input BAM file
out.bamfile The name of the new output BAM file
Options:
-suffix suff A suffix to add to each read name
-xs Add the XS:A tag for +/- strandedness (req'd by Cufflinks)
-tag tag Add an arbitrary tag (ex: -tag XX:Z:test)
-junction tag Predicts junction spans from CIGAR alignment
-orig-ref tag Add a new tag with the original reference name (For
example, in a region-based BAM will be converted to
standard coordinates)
-orig-pos tag Add a new tag with the original reference pos
-orig-cigar tag Add a new tag with the original CIGAR alignment
-f Force overwriting the output BAM file if it exists
"""
sys.exit(1)
if __name__ == "__main__":
infname = None
outfname = None
force = False
last = None
args = []
for arg in sys.argv[1:]:
if arg == '-f':
force = True
elif last == '-suffix':
args.append([Suffix, arg])
last = None
elif last == '-tag':
args.append([Tag, arg])
last = None
elif last == '-orig-ref':
args.append([OrigRef, arg])
last = None
elif last == '-orig-pos':
args.append([OrigPos, arg])
last = None
elif last == '-orig-cigar':
args.append([OrigCIGAR, arg])
last = None
elif last == '-junction':
args.append([PredictJunction, arg])
last = None
elif arg in ['-suffix', '-tag', '-orig-ref', '-orig-pos', '-orig-cigar', '-junction']:
last = arg
elif arg == '-xs':
args.append([CufflinksXS, ])
elif not infname and os.path.exists(arg):
infname = arg
elif not outfname:
outfname = arg
if not infname or not outfname or not args:
usage()
if not force and os.path.exists(outfname):
sys.stderr.write('ERROR: %s already exists! Not overwriting without force (-f)\n\n' % outfname)
sys.exit(1)
writer = BamWriter(outfname, infname)
chain = BamReader()
for arg in args:
chain = arg[0](chain, *arg[1:])
writer.run_chain(chain)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._open_id_connect_provider_operations import build_create_or_update_request, build_delete_request, build_get_entity_tag_request, build_get_request, build_list_by_service_request, build_list_secrets_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OpenIdConnectProviderOperations:
"""OpenIdConnectProviderOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~api_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_service(
self,
resource_group_name: str,
service_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.OpenIdConnectProviderCollection"]:
"""Lists of all the OpenId Connect Providers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
displayName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OpenIdConnectProviderCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~api_management_client.models.OpenIdConnectProviderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenIdConnectProviderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_service_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=self.list_by_service.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_service_request(
resource_group_name=resource_group_name,
service_name=service_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OpenIdConnectProviderCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders'} # type: ignore
@distributed_trace_async
async def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
opid: str,
**kwargs: Any
) -> bool:
"""Gets the entity state (Etag) version of the openIdConnectProvider specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_entity_tag_request(
resource_group_name=resource_group_name,
service_name=service_name,
opid=opid,
subscription_id=self._config.subscription_id,
template_url=self.get_entity_tag.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
opid: str,
**kwargs: Any
) -> "_models.OpenidConnectProviderContract":
"""Gets specific OpenID Connect Provider without secrets.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenidConnectProviderContract, or the result of cls(response)
:rtype: ~api_management_client.models.OpenidConnectProviderContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenidConnectProviderContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
opid=opid,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
opid: str,
parameters: "_models.OpenidConnectProviderContract",
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.OpenidConnectProviderContract":
"""Creates or updates the OpenID Connect Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:param parameters: Create parameters.
:type parameters: ~api_management_client.models.OpenidConnectProviderContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenidConnectProviderContract, or the result of cls(response)
:rtype: ~api_management_client.models.OpenidConnectProviderContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenidConnectProviderContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'OpenidConnectProviderContract')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
opid=opid,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
service_name: str,
opid: str,
if_match: str,
parameters: "_models.OpenidConnectProviderUpdateContract",
**kwargs: Any
) -> "_models.OpenidConnectProviderContract":
"""Updates the specific OpenID Connect Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~api_management_client.models.OpenidConnectProviderUpdateContract
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenidConnectProviderContract, or the result of cls(response)
:rtype: ~api_management_client.models.OpenidConnectProviderContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenidConnectProviderContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'OpenidConnectProviderUpdateContract')
request = build_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
opid=opid,
subscription_id=self._config.subscription_id,
content_type=content_type,
if_match=if_match,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('OpenidConnectProviderContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
service_name: str,
opid: str,
if_match: str,
**kwargs: Any
) -> None:
"""Deletes specific OpenID Connect Provider of the API Management service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
opid=opid,
subscription_id=self._config.subscription_id,
if_match=if_match,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}'} # type: ignore
@distributed_trace_async
async def list_secrets(
self,
resource_group_name: str,
service_name: str,
opid: str,
**kwargs: Any
) -> "_models.ClientSecretContract":
"""Gets the client secret details of the OpenID Connect Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param opid: Identifier of the OpenID Connect Provider.
:type opid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClientSecretContract, or the result of cls(response)
:rtype: ~api_management_client.models.ClientSecretContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClientSecretContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_secrets_request(
resource_group_name=resource_group_name,
service_name=service_name,
opid=opid,
subscription_id=self._config.subscription_id,
template_url=self.list_secrets.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ClientSecretContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_secrets.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/openidConnectProviders/{opid}/listSecrets'} # type: ignore
| |
#SPDX-License-Identifier: MIT
"""
Metrics that provide data about pull requests & their associated activity
"""
import datetime
import sqlalchemy as s
import pandas as pd
from augur.util import register_metric
@register_metric()
def pull_requests_merge_contributor_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""
Returns a timeseries of the count of persons contributing with an accepted commit for the first time.
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if repo_id:
commitNewContributor = s.sql.text("""
SELECT date_trunc(:period, new_date::DATE) as commit_date,
COUNT(cmt_author_email), repo_name
FROM ( SELECT repo_name, cmt_author_email, MIN(TO_TIMESTAMP(cmt_author_date,'YYYY-MM-DD')) AS new_date
FROM commits JOIN repo ON commits.repo_id = repo.repo_id
WHERE commits.repo_id = :repo_id
AND TO_TIMESTAMP(cmt_author_date,'YYYY-MM-DD') BETWEEN :begin_date AND :end_date AND cmt_author_email IS NOT NULL
GROUP BY cmt_author_email, repo_name
) as abc GROUP BY commit_date, repo_name
""")
results = pd.read_sql(commitNewContributor, self.database, params={'repo_id': repo_id, 'period': period,
'begin_date': begin_date,
'end_date': end_date})
else:
commitNewContributor = s.sql.text("""
SELECT abc.repo_id, repo_name ,date_trunc(:period, new_date::DATE) as commit_date,
COUNT(cmt_author_email)
FROM (SELECT cmt_author_email, MIN(TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD')) AS new_date, repo_id
FROM commits
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD') BETWEEN :begin_date AND :end_date
AND cmt_author_email IS NOT NULL
GROUP BY cmt_author_email, repo_id
) as abc, repo
WHERE abc.repo_id = repo.repo_id
GROUP BY abc.repo_id, repo_name, commit_date
""")
results = pd.read_sql(commitNewContributor, self.database,
params={'repo_group_id': repo_group_id, 'period': period,
'begin_date': begin_date,
'end_date': end_date})
return results
@register_metric()
def pull_requests_closed_no_merge(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""
Returns a timeseries of the which were closed but not merged
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if repo_id:
closedNoMerge = s.sql.text("""
SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date,
COUNT(pull_request_id) as pr_count
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id = :repo_id
AND pull_requests.pr_closed_at is NOT NULL AND
pull_requests.pr_merged_at is NULL
GROUP BY closed_date, pull_request_id
ORDER BY closed_date
""")
results = pd.read_sql(closedNoMerge, self.database, params={'repo_id': repo_id, 'period': period,
'begin_date': begin_date,
'end_date': end_date})
else:
closedNoMerge = s.sql.text("""
SELECT DATE_TRUNC(:period, pull_requests.pr_closed_at) AS closed_date,
COUNT(pull_request_id) as pr_count
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id WHERE pull_requests.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
and pull_requests.pr_closed_at is NOT NULL and pull_requests.pr_merged_at is NULL
GROUP BY closed_date, pull_request_id
ORDER BY closed_date
""")
results = pd.read_sql(closedNoMerge, self.database,
params={'repo_group_id': repo_group_id, 'period': period,
'begin_date': begin_date,
'end_date': end_date})
return results
@register_metric()
def reviews(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
""" Returns a timeseris of new reviews or pull requests opened
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of new reviews/period
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
if not repo_id:
reviews_SQL = s.sql.text("""
SELECT
pull_requests.repo_id,
repo_name,
DATE_TRUNC(:period, pull_requests.pr_created_at) AS date,
COUNT(pr_src_id) AS pull_requests
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND pull_requests.pr_created_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
GROUP BY pull_requests.repo_id, repo_name, date
ORDER BY pull_requests.repo_id, date
""")
results = pd.read_sql(reviews_SQL, self.database,
params={'period': period, 'repo_group_id': repo_group_id,
'begin_date': begin_date, 'end_date': end_date })
return results
else:
reviews_SQL = s.sql.text("""
SELECT
repo_name,
DATE_TRUNC(:period, pull_requests.pr_created_at) AS date,
COUNT(pr_src_id) AS pull_requests
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id = :repo_id
AND pull_requests.pr_created_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD HH24:MI:SS')
AND to_timestamp(:end_date, 'YYYY-MM-DD HH24:MI:SS')
GROUP BY date, repo_name
ORDER BY date
""")
results = pd.read_sql(reviews_SQL, self.database,
params={'period': period, 'repo_id': repo_id,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def reviews_accepted(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""Returns a timeseries of number of reviews or pull requests accepted.
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of accepted reviews/period
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
if not repo_id:
reviews_accepted_SQL = s.sql.text("""
SELECT
pull_requests.repo_id,
repo.repo_name,
DATE_TRUNC(:period, pull_requests.pr_merged_at) AS date,
COUNT(pr_src_id) AS pull_requests
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND pr_merged_at IS NOT NULL
AND pr_merged_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
GROUP BY pull_requests.repo_id, repo_name, date
ORDER BY pull_requests.repo_id, date
""")
results = pd.read_sql(reviews_accepted_SQL, self.database,
params={'period': period, 'repo_group_id': repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
return results
else:
reviews_accepted_SQL = s.sql.text("""
SELECT
repo.repo_name,
DATE_TRUNC(:period, pull_requests.pr_merged_at) AS date,
COUNT(pr_src_id) AS pull_requests
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id = :repo_id
AND pr_merged_at IS NOT NULL
AND pr_merged_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
GROUP BY date, repo.repo_name
ORDER BY date
""")
results = pd.read_sql(reviews_accepted_SQL, self.database,
params={'period': period, 'repo_id': repo_id,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def reviews_declined(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
""" Returns a time series of reivews declined
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of declined reviews/period
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
if not repo_id:
reviews_declined_SQL = s.sql.text("""
SELECT
pull_requests.repo_id,
repo.repo_name,
DATE_TRUNC(:period, pull_requests.pr_closed_at) AS date,
COUNT(pr_src_id) AS pull_requests
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND pr_src_state = 'closed' AND pr_merged_at IS NULL
AND pr_closed_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
GROUP BY pull_requests.repo_id, repo_name, date
ORDER BY pull_requests.repo_id, date
""")
results = pd.read_sql(reviews_declined_SQL, self.database,
params={'period': period, 'repo_group_id': repo_group_id,
'begin_date': begin_date, 'end_date': end_date })
return results
else:
reviews_declined_SQL = s.sql.text("""
SELECT
repo.repo_name,
DATE_TRUNC(:period, pull_requests.pr_closed_at) AS date,
COUNT(pr_src_id) AS pull_requests
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id = :repo_id
AND pr_src_state = 'closed' AND pr_merged_at IS NULL
AND pr_closed_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
GROUP BY date, repo.repo_name
ORDER BY date
""")
results = pd.read_sql(reviews_declined_SQL, self.database,
params={'period': period, 'repo_id': repo_id,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def review_duration(self, repo_group_id, repo_id=None, begin_date=None, end_date=None):
""" Returns the duration of each accepted review.
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of pull request id with the corresponding duration
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
if not repo_id:
review_duration_SQL = s.sql.text("""
SELECT
pull_requests.repo_id,
repo.repo_name,
pull_requests.pull_request_id,
pull_requests.pr_created_at AS created_at,
pull_requests.pr_merged_at AS merged_at,
(pr_merged_at - pr_created_at) AS duration
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND pr_merged_at IS NOT NULL
AND pr_created_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
ORDER BY pull_requests.repo_id, pull_requests.pull_request_id
""")
results = pd.read_sql(review_duration_SQL, self.database,
params={'repo_group_id': repo_group_id,
'begin_date': begin_date,
'end_date': end_date})
results['duration'] = results['duration'].astype(str)
return results
else:
review_duration_SQL = s.sql.text("""
SELECT
repo_name,
pull_request_id,
pr_created_at AS created_at,
pr_merged_at AS merged_at,
(pr_merged_at - pr_created_at) AS duration
FROM pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id = :repo_id
AND pr_merged_at IS NOT NULL
AND pr_created_at
BETWEEN to_timestamp(:begin_date, 'YYYY-MM-DD')
AND to_timestamp(:end_date, 'YYYY-MM-DD')
ORDER BY pull_requests.repo_id, pull_request_id
""")
results = pd.read_sql(review_duration_SQL, self.database,
params={'repo_id': repo_id,
'begin_date': begin_date,
'end_date': end_date})
results['duration'] = results['duration'].astype(str)
return results
@register_metric()
def pull_request_acceptance_rate(self, repo_group_id, repo_id=None, begin_date=None, end_date=None, group_by='week'):
"""
Timeseries of pull request acceptance rate (expressed as the ratio of pull requests merged on a date to the count of pull requests opened on a date)
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:return: DataFrame with ratio/day
"""
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if not repo_id:
prAccRateSQL = s.sql.text("""
SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate"
FROM
(
SELECT count(issue_events.issue_id) AS num_approved,
date_trunc(:group_by,issue_events.created_at) AS accepted_on
FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id
JOIN repo ON issues.repo_id = repo.repo_id
WHERE action = 'merged'
AND issues.pull_request IS NOT NULL
AND repo_group_id = :repo_group_id
AND issue_events.created_at BETWEEN :begin_date AND :end_date
GROUP BY accepted_on
ORDER BY accepted_on
) accepted
JOIN
(
SELECT count(issue_events.issue_id) AS num_open,
date_trunc(:group_by,issue_events.created_at) AS date_created
FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id
JOIN repo ON issues.repo_id = repo.repo_id
WHERE action = 'ready_for_review'
AND issues.pull_request IS NOT NULL
AND repo_group_id = :repo_group_id
AND issue_events.created_at BETWEEN :begin_date AND :end_date
GROUP BY date_created
ORDER BY date_created
) opened
ON opened.date_created = accepted.accepted_on
""")
results = pd.read_sql(prAccRateSQL, self.database, params={'repo_group_id': repo_group_id, 'group_by': group_by,
'begin_date': begin_date, 'end_date': end_date})
return results
else:
prAccRateSQL = s.sql.text("""
SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate"
FROM
(
SELECT count(issue_events.issue_id) AS num_approved,
date_trunc(:group_by,issue_events.created_at) AS accepted_on
FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id
WHERE action = 'merged'
AND issues.pull_request IS NOT NULL
AND repo_id = :repo_id
AND issue_events.created_at BETWEEN :begin_date AND :end_date
GROUP BY accepted_on
ORDER BY accepted_on
) accepted
JOIN
(
SELECT count(issue_events.issue_id) AS num_open,
date_trunc(:group_by,issue_events.created_at) AS date_created
FROM issue_events JOIN issues ON issues.issue_id = issue_events.issue_id
WHERE action = 'ready_for_review'
AND issues.pull_request IS NOT NULL
AND repo_id = :repo_id
AND issue_events.created_at BETWEEN :begin_date AND :end_date
GROUP BY date_created
ORDER BY date_created
) opened
ON opened.date_created = accepted.accepted_on
""")
results = pd.read_sql(prAccRateSQL, self.database, params={'repo_id': repo_id, 'group_by': group_by,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def pull_request_average_time_to_close(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None):
""" Avegage time to close pull requests with merged_status and the time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param time_unit: Unit of time for data, options are: 'hours', or 'days', defaults to 'hours'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of average time to close pull request
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
rg_name AS repo_group_name,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part('week', pr_closed_at :: DATE) AS closed_week,
date_part('day', pr_closed_at :: DATE) AS closed_day,
EXTRACT (epoch FROM time_to_close)/ 86400 AS average_days_to_close,
EXTRACT (epoch FROM time_to_close)/ 3600 AS average_hours_to_close,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT
pull_requests.pull_request_id,
pull_requests.repo_id,
repo_name,
repo.repo_group_id,
rg_name,
pr_closed_at,
pr_created_at,
pr_closed_at - pr_created_at AS time_to_close,
pr_merged_at
FROM pull_request_message_ref, message, repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND repo.repo_group_id = repo_groups.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, repo.repo_name, repo.repo_group_id, repo_groups.rg_name
) time_between_responses
GROUP BY merged_status, time_between_responses.pr_closed_at, time_between_responses.time_to_close, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name
ORDER BY merged_status
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part('week', pr_closed_at :: DATE) AS closed_week,
date_part('day', pr_closed_at :: DATE) AS closed_day,
EXTRACT (epoch FROM time_to_close)/ 86400 AS average_days_to_close,
EXTRACT (epoch FROM time_to_close)/ 3600 AS average_hours_to_close,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_closed_at - pr_created_at AS time_to_close,
pr_merged_at
FROM pull_requests, repo, pull_request_message_ref, message
WHERE repo.repo_id = :repo_id
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id
) time_between_responses
GROUP BY merged_status, time_between_responses.pr_closed_at, time_between_responses.time_to_close
ORDER BY merged_status
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
pr_avg_time_to_close = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_to_close'.format(time_unit)]]
else:
pr_avg_time_to_close = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_to_close'.format(time_unit)]]
return pr_avg_time_to_close
@register_metric()
def pull_request_average_time_between_responses(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None):
""" Avegage time between responeses with merged_status and the time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param time_unit: Unit of time for data, options are: 'minutes', or 'hours', defaults to 'hours'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of average time beteen responses
=======
@register_metric()
def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='week'):
>>>>>>> Stashed changes
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
rg_name AS repo_group_name,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
(EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses,
(EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT
repo_name,
repo_groups.repo_group_id,
rg_name,
pull_requests.repo_id,
pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_merged_at,
(MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses
FROM pull_request_message_ref, message, repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND repo_groups.repo_group_id = repo.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, repo.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name
) time_between_responses
GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
(EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses,
(EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_merged_at,
(MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses
FROM pull_requests, repo, pull_request_message_ref, message
WHERE repo.repo_id = :repo_id
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id
) time_between_responses
GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
pr_avg_time_between_responses = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_between_responses'.format(time_unit)]]
else:
pr_avg_time_between_responses = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_between_responses'.format(time_unit)]]
return pr_avg_time_between_responses
@register_metric()
def pull_request_average_commit_counts(self, repo_group_id, repo_id=None, group_by='month', begin_date=None, end_date=None):
""" Average commits per pull request, with merged status and time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of average commits per pull request
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
repo_group_name,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
commit_count AS average_commits_per_pull_request,
count(*) AS pr_count
FROM (
SELECT
pull_requests.repo_id,
repo.repo_name,
repo_groups.repo_group_id,
rg_name AS repo_group_name,
pull_request_commits.pull_request_id,
count(DISTINCT pr_cmt_sha) AS commit_count,
pr_merged_at,
pr_closed_at,
pr_created_at
FROM augur_data.pull_request_commits, augur_data.pull_request_meta,augur_data.repo_groups,
augur_data.pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND pull_requests.pull_request_id = pull_request_commits.pull_request_id
AND pull_requests.pull_request_id = pull_request_meta.pull_request_id
AND pr_cmt_sha <> pull_requests.pr_merge_commit_sha
AND pr_cmt_sha <> pull_request_meta.pr_sha
AND repo_groups.repo_group_id = repo.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_request_commits.pull_request_id, pr_merged_at, pr_closed_at, pr_created_at, repo.repo_name, pull_requests.repo_id, repo_groups.rg_name, repo_groups.repo_group_id
ORDER BY pr_created_at
) data
GROUP BY closed_year, merged_status, data.pr_closed_at, data.commit_count, data.repo_id, data.repo_name, data.repo_group_id, data.repo_group_name
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
commit_count AS average_commits_per_pull_request,
count(*) AS pr_count
FROM (
SELECT
pull_request_commits.pull_request_id,
count(DISTINCT pr_cmt_sha) AS commit_count,
pr_merged_at,
pr_closed_at,
pr_created_at
FROM augur_data.pull_request_commits, augur_data.pull_requests, augur_data.pull_request_meta
WHERE pull_requests.pull_request_id = pull_request_commits.pull_request_id
AND pull_requests.pull_request_id = pull_request_meta.pull_request_id
AND pull_requests.repo_id = :repo_id
AND pr_cmt_sha <> pull_requests.pr_merge_commit_sha
AND pr_cmt_sha <> pull_request_meta.pr_sha
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_request_commits.pull_request_id, pr_merged_at, pr_closed_at, pr_created_at
ORDER BY pr_created_at
) data
GROUP BY closed_year, merged_status, data.pr_closed_at, data.commit_count
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
pr_avg_commit_counts = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_commits_per_pull_request']]
else:
pr_avg_commit_counts = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_commits_per_pull_request']]
return pr_avg_commit_counts
@register_metric()
def pull_request_average_event_counts(self, repo_group_id, repo_id=None, group_by='month', begin_date=None, end_date=None):
""" Average of event counts with merged status and time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of event counts avergages
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
repo_group_name,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
assigned_count AS average_assigned_count,
review_requested_count AS average_review_requested_count,
labeled_count AS average_labeled_count,
unlabeled_count AS average_unlabeled_count,
subscribed_count AS average_subscribed_count,
mentioned_count AS average_mentioned_count,
referenced_count AS average_referenced_count,
closed_count AS average_closed_count,
head_ref_force_pushed_count AS average_head_ref_force_pushed_count,
head_ref_deleted_count AS average_head_ref_deleted_count,
milestoned_count AS average_milestoned_count,
merged_count AS average_merged_count,
comment_count AS average_comment_count,
count(*) AS num_pull_requests
FROM (
SELECT
pull_requests.repo_id,
repo_name,
repo_groups.repo_group_id,
rg_name AS repo_group_name,
pull_requests.pull_request_id,
pr_merged_at,
pr_created_at,
pr_closed_at,
count(*) FILTER (WHERE action = 'assigned') AS assigned_count,
count(*) FILTER (WHERE action = 'review_requested') AS review_requested_count,
count(*) FILTER (WHERE action = 'labeled') AS labeled_count,
count(*) FILTER (WHERE action = 'unlabeled') AS unlabeled_count,
count(*) FILTER (WHERE action = 'subscribed') AS subscribed_count,
count(*) FILTER (WHERE action = 'mentioned') AS mentioned_count,
count(*) FILTER (WHERE action = 'referenced') AS referenced_count,
count(*) FILTER (WHERE action = 'closed') AS closed_count,
count(*) FILTER (WHERE action = 'head_ref_force_pushed') AS head_ref_force_pushed_count,
count(*) FILTER (WHERE action = 'head_ref_deleted') AS head_ref_deleted_count,
count(*) FILTER (WHERE action = 'milestoned') AS milestoned_count,
count(*) FILTER (WHERE action = 'merged') AS merged_count,
COUNT(DISTINCT message.msg_timestamp) AS comment_count
FROM pull_request_events, pull_request_message_ref, message, repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo.repo_id = pull_requests.repo_id
AND repo_groups.repo_group_id = repo.repo_group_id
AND pull_requests.pull_request_id = pull_request_events.pull_request_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name
) data
GROUP BY closed_year, closed_month, closed_week, closed_day, merged_status, data.assigned_count, data.review_requested_count, data.labeled_count, data.unlabeled_count, data.subscribed_count, data.mentioned_count, data.referenced_count, data.closed_count,
data.head_ref_force_pushed_count, data.head_ref_deleted_count, data.milestoned_count, data.merged_count, data.comment_count, data.repo_id, data.repo_name, data.repo_group_id, data.repo_group_name
ORDER BY merged_status, closed_year, closed_week, closed_day
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
assigned_count AS average_assigned_count,
review_requested_count AS average_review_requested_count,
labeled_count AS average_labeled_count,
unlabeled_count AS average_unlabeled_count,
subscribed_count AS average_subscribed_count,
mentioned_count AS average_mentioned_count,
referenced_count AS average_referenced_count,
closed_count AS average_closed_count,
head_ref_force_pushed_count AS average_head_ref_force_pushed_count,
head_ref_deleted_count AS average_head_ref_deleted_count,
milestoned_count AS average_milestoned_count,
merged_count AS average_merged_count,
comment_count AS average_comment_count,
count(*) AS num_pull_requests
FROM (
SELECT pull_requests.pull_request_id,
pr_merged_at,
pr_created_at,
pr_closed_at,
count(*) FILTER (WHERE action = 'assigned') AS assigned_count,
count(*) FILTER (WHERE action = 'review_requested') AS review_requested_count,
count(*) FILTER (WHERE action = 'labeled') AS labeled_count,
count(*) FILTER (WHERE action = 'unlabeled') AS unlabeled_count,
count(*) FILTER (WHERE action = 'subscribed') AS subscribed_count,
count(*) FILTER (WHERE action = 'mentioned') AS mentioned_count,
count(*) FILTER (WHERE action = 'referenced') AS referenced_count,
count(*) FILTER (WHERE action = 'closed') AS closed_count,
count(*) FILTER (WHERE action = 'head_ref_force_pushed') AS head_ref_force_pushed_count,
count(*) FILTER (WHERE action = 'head_ref_deleted') AS head_ref_deleted_count,
count(*) FILTER (WHERE action = 'milestoned') AS milestoned_count,
count(*) FILTER (WHERE action = 'merged') AS merged_count,
COUNT(DISTINCT message.msg_timestamp) AS comment_count
FROM pull_request_events, pull_requests, repo, pull_request_message_ref, message
WHERE repo.repo_id = :repo_id
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_events.pull_request_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id
) data
GROUP BY closed_year, closed_month, closed_week, closed_day, merged_status, data.assigned_count, data.review_requested_count, data.labeled_count, data.unlabeled_count, data.subscribed_count, data.mentioned_count, data.referenced_count, data.closed_count,
data.head_ref_force_pushed_count, data.head_ref_deleted_count, data.milestoned_count, data.merged_count, data.comment_count
ORDER BY merged_status, closed_year, closed_week, closed_day
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
count_names = ['assigned_count', 'review_requested_count', 'labeled_count', 'unlabeled_count', 'subscribed_count', 'mentioned_count', 'referenced_count', 'closed_count', 'head_ref_force_pushed_count', 'head_ref_deleted_count', 'milestoned_count', 'merged_count', 'comment_count']
average_count_names = []
for name in count_names.copy():
average_count_names.append('average_' + name)
if not repo_id:
pr_avg_event_counts = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + average_count_names]
else:
pr_avg_event_counts = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[['merged_status'] + time_group_bys + average_count_names]
return pr_avg_event_counts
@register_metric()
def pull_request_average_time_to_responses_and_close(self, repo_group_id, repo_id=None, group_by='month', time_unit ='days', begin_date=None, end_date=None):
""" Average of time to first reponse, last response, and time to close with merged status and time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param time_unit: Unit of time of data is in, options are: 'hours', or 'days', defaults to 'days'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of averages of time to first response, last response, and close
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
repo_group_name,
EXTRACT(epoch FROM(first_response_time - pr_created_at)/86400) AS average_days_to_first_response,
EXTRACT(epoch FROM(first_response_time - pr_created_at)/3600) AS average_hours_to_first_response,
EXTRACT(epoch FROM(last_response_time - pr_created_at)/86400) AS average_days_to_last_response,
EXTRACT(epoch FROM(last_response_time - pr_created_at)/3600) AS average_hours_to_last_response,
EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/86400) AS average_days_to_close,
EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/3600) AS average_hours_to_close,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
count(*) AS num_pull_requests
FROM (
SELECT
pull_requests.repo_id,
repo.repo_name,
repo_groups.repo_group_id,
rg_name AS repo_group_name,
pull_requests.pull_request_id,
MIN(message.msg_timestamp) AS first_response_time,
MAX(message.msg_timestamp) AS last_response_time,
pull_requests.pr_closed_at,
pr_created_at,
pull_requests.pr_merged_at
FROM pull_request_message_ref, message, repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND repo_groups.repo_group_id = repo.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name
) response_times
GROUP BY closed_year, merged_status, response_times.first_response_time, response_times.last_response_time, response_times.pr_created_at, response_times.pr_closed_at, response_times.repo_id, response_times.repo_name, response_times.repo_group_id, response_times.repo_group_name
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
EXTRACT(epoch FROM(first_response_time - pr_created_at)/86400) AS average_days_to_first_response,
EXTRACT(epoch FROM(first_response_time - pr_created_at)/3600) AS average_hours_to_first_response,
EXTRACT(epoch FROM(last_response_time - pr_created_at)/86400) AS average_days_to_last_response,
EXTRACT(epoch FROM(last_response_time - pr_created_at)/3600) AS average_hours_to_last_response,
EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/86400) AS average_days_to_close,
EXTRACT(epoch FROM(pr_closed_at - pr_created_at)/3600) AS average_hours_to_close,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
count(*) AS num_pull_requests
FROM (
SELECT pull_requests.pull_request_id,
MIN(message.msg_timestamp) AS first_response_time,
MAX(message.msg_timestamp) AS last_response_time,
pull_requests.pr_closed_at,
pr_created_at,
pull_requests.pr_merged_at
FROM pull_requests, repo, pull_request_message_ref, message
WHERE repo.repo_id = :repo_id
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id
) response_times
GROUP BY closed_year, merged_status, response_times.first_response_time, response_times.last_response_time, response_times.pr_created_at, response_times.pr_closed_at
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
avg_pr_time_to_responses_and_close = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_to_first_response'.format(time_unit), 'average_{}_to_last_response'.format(time_unit), 'average_{}_to_close'.format(time_unit)]]
else:
avg_pr_time_to_responses_and_close = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_to_first_response'.format(time_unit), 'average_{}_to_last_response'.format(time_unit), 'average_{}_to_close'.format(time_unit)]]
return avg_pr_time_to_responses_and_close
@register_metric()
def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='month'):
""" Merged status counts with time frames
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of merged status counts
"""
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for time_unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(time_unit))
del unit_options[0]
if not repo_id:
pr_all_sql = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
repo_group_name,
pull_request_id AS pull_request_count,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day
FROM (
SELECT
pull_requests.pull_request_id,
pull_requests.repo_id,
repo.repo_name,
repo_groups.repo_group_id,
rg_name AS repo_group_name,
pr_merged_at,
pr_closed_at
FROM repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo_groups.repo_group_id = repo.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, pull_requests.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name
) data
GROUP BY repo_id, repo_name, repo_group_id, repo_group_name, pull_request_id, pr_merged_at, pr_closed_at
""")
else:
pr_all_sql = s.sql.text("""
SELECT
pull_request_id as pull_request_count,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' end as merged_status,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day
from pull_requests
where repo_id = :repo_id
AND pr_created_at::date >= :begin_date ::date
AND pr_closed_at::date <= :end_date ::date
""")
pr_all = pd.read_sql(pr_all_sql, self.database, params={'repo_group_id': repo_group_id,
'repo_id': repo_id, 'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
pr_merged_counts = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).count().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['pull_request_count']]
else:
pr_merged_counts = pr_all.groupby(['merged_status'] + time_group_bys).count().reset_index()[time_group_bys + ['merged_status', 'pull_request_count']]
return pr_merged_counts
| |
'''tzinfo timezone information for America/Moncton.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Moncton(DstTzInfo):
'''America/Moncton timezone definition. See datetime.tzinfo for details'''
zone = 'America/Moncton'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1902,6,15,5,0,0),
d(1918,4,14,6,0,0),
d(1918,10,31,5,0,0),
d(1933,6,11,5,0,0),
d(1933,9,10,4,0,0),
d(1934,6,10,5,0,0),
d(1934,9,9,4,0,0),
d(1935,6,9,5,0,0),
d(1935,9,8,4,0,0),
d(1936,6,7,5,0,0),
d(1936,9,6,4,0,0),
d(1937,6,6,5,0,0),
d(1937,9,5,4,0,0),
d(1938,6,5,5,0,0),
d(1938,9,4,4,0,0),
d(1939,5,27,5,0,0),
d(1939,9,23,4,0,0),
d(1940,5,19,5,0,0),
d(1940,9,21,4,0,0),
d(1941,5,4,5,0,0),
d(1941,9,27,4,0,0),
d(1942,2,9,6,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,5,0,0),
d(1946,4,28,6,0,0),
d(1946,9,29,5,0,0),
d(1947,4,27,6,0,0),
d(1947,9,28,5,0,0),
d(1948,4,25,6,0,0),
d(1948,9,26,5,0,0),
d(1949,4,24,6,0,0),
d(1949,9,25,5,0,0),
d(1950,4,30,6,0,0),
d(1950,9,24,5,0,0),
d(1951,4,29,6,0,0),
d(1951,9,30,5,0,0),
d(1952,4,27,6,0,0),
d(1952,9,28,5,0,0),
d(1953,4,26,6,0,0),
d(1953,9,27,5,0,0),
d(1954,4,25,6,0,0),
d(1954,9,26,5,0,0),
d(1955,4,24,6,0,0),
d(1955,9,25,5,0,0),
d(1956,4,29,6,0,0),
d(1956,9,30,5,0,0),
d(1957,4,28,6,0,0),
d(1957,10,27,5,0,0),
d(1958,4,27,6,0,0),
d(1958,10,26,5,0,0),
d(1959,4,26,6,0,0),
d(1959,10,25,5,0,0),
d(1960,4,24,6,0,0),
d(1960,10,30,5,0,0),
d(1961,4,30,6,0,0),
d(1961,10,29,5,0,0),
d(1962,4,29,6,0,0),
d(1962,10,28,5,0,0),
d(1963,4,28,6,0,0),
d(1963,10,27,5,0,0),
d(1964,4,26,6,0,0),
d(1964,10,25,5,0,0),
d(1965,4,25,6,0,0),
d(1965,10,31,5,0,0),
d(1966,4,24,6,0,0),
d(1966,10,30,5,0,0),
d(1967,4,30,6,0,0),
d(1967,10,29,5,0,0),
d(1968,4,28,6,0,0),
d(1968,10,27,5,0,0),
d(1969,4,27,6,0,0),
d(1969,10,26,5,0,0),
d(1970,4,26,6,0,0),
d(1970,10,25,5,0,0),
d(1971,4,25,6,0,0),
d(1971,10,31,5,0,0),
d(1972,4,30,6,0,0),
d(1972,10,29,5,0,0),
d(1974,4,28,6,0,0),
d(1974,10,27,5,0,0),
d(1975,4,27,6,0,0),
d(1975,10,26,5,0,0),
d(1976,4,25,6,0,0),
d(1976,10,31,5,0,0),
d(1977,4,24,6,0,0),
d(1977,10,30,5,0,0),
d(1978,4,30,6,0,0),
d(1978,10,29,5,0,0),
d(1979,4,29,6,0,0),
d(1979,10,28,5,0,0),
d(1980,4,27,6,0,0),
d(1980,10,26,5,0,0),
d(1981,4,26,6,0,0),
d(1981,10,25,5,0,0),
d(1982,4,25,6,0,0),
d(1982,10,31,5,0,0),
d(1983,4,24,6,0,0),
d(1983,10,30,5,0,0),
d(1984,4,29,6,0,0),
d(1984,10,28,5,0,0),
d(1985,4,28,6,0,0),
d(1985,10,27,5,0,0),
d(1986,4,27,6,0,0),
d(1986,10,26,5,0,0),
d(1987,4,5,6,0,0),
d(1987,10,25,5,0,0),
d(1988,4,3,6,0,0),
d(1988,10,30,5,0,0),
d(1989,4,2,6,0,0),
d(1989,10,29,5,0,0),
d(1990,4,1,6,0,0),
d(1990,10,28,5,0,0),
d(1991,4,7,6,0,0),
d(1991,10,27,5,0,0),
d(1992,4,5,6,0,0),
d(1992,10,25,5,0,0),
d(1993,4,4,4,1,0),
d(1993,10,31,3,1,0),
d(1994,4,3,4,1,0),
d(1994,10,30,3,1,0),
d(1995,4,2,4,1,0),
d(1995,10,29,3,1,0),
d(1996,4,7,4,1,0),
d(1996,10,27,3,1,0),
d(1997,4,6,4,1,0),
d(1997,10,26,3,1,0),
d(1998,4,5,4,1,0),
d(1998,10,25,3,1,0),
d(1999,4,4,4,1,0),
d(1999,10,31,3,1,0),
d(2000,4,2,4,1,0),
d(2000,10,29,3,1,0),
d(2001,4,1,4,1,0),
d(2001,10,28,3,1,0),
d(2002,4,7,4,1,0),
d(2002,10,27,3,1,0),
d(2003,4,6,4,1,0),
d(2003,10,26,3,1,0),
d(2004,4,4,4,1,0),
d(2004,10,31,3,1,0),
d(2005,4,3,4,1,0),
d(2005,10,30,3,1,0),
d(2006,4,2,4,1,0),
d(2006,10,29,3,1,0),
d(2007,3,11,6,0,0),
d(2007,11,4,5,0,0),
d(2008,3,9,6,0,0),
d(2008,11,2,5,0,0),
d(2009,3,8,6,0,0),
d(2009,11,1,5,0,0),
d(2010,3,14,6,0,0),
d(2010,11,7,5,0,0),
d(2011,3,13,6,0,0),
d(2011,11,6,5,0,0),
d(2012,3,11,6,0,0),
d(2012,11,4,5,0,0),
d(2013,3,10,6,0,0),
d(2013,11,3,5,0,0),
d(2014,3,9,6,0,0),
d(2014,11,2,5,0,0),
d(2015,3,8,6,0,0),
d(2015,11,1,5,0,0),
d(2016,3,13,6,0,0),
d(2016,11,6,5,0,0),
d(2017,3,12,6,0,0),
d(2017,11,5,5,0,0),
d(2018,3,11,6,0,0),
d(2018,11,4,5,0,0),
d(2019,3,10,6,0,0),
d(2019,11,3,5,0,0),
d(2020,3,8,6,0,0),
d(2020,11,1,5,0,0),
d(2021,3,14,6,0,0),
d(2021,11,7,5,0,0),
d(2022,3,13,6,0,0),
d(2022,11,6,5,0,0),
d(2023,3,12,6,0,0),
d(2023,11,5,5,0,0),
d(2024,3,10,6,0,0),
d(2024,11,3,5,0,0),
d(2025,3,9,6,0,0),
d(2025,11,2,5,0,0),
d(2026,3,8,6,0,0),
d(2026,11,1,5,0,0),
d(2027,3,14,6,0,0),
d(2027,11,7,5,0,0),
d(2028,3,12,6,0,0),
d(2028,11,5,5,0,0),
d(2029,3,11,6,0,0),
d(2029,11,4,5,0,0),
d(2030,3,10,6,0,0),
d(2030,11,3,5,0,0),
d(2031,3,9,6,0,0),
d(2031,11,2,5,0,0),
d(2032,3,14,6,0,0),
d(2032,11,7,5,0,0),
d(2033,3,13,6,0,0),
d(2033,11,6,5,0,0),
d(2034,3,12,6,0,0),
d(2034,11,5,5,0,0),
d(2035,3,11,6,0,0),
d(2035,11,4,5,0,0),
d(2036,3,9,6,0,0),
d(2036,11,2,5,0,0),
d(2037,3,8,6,0,0),
d(2037,11,1,5,0,0),
]
_transition_info = [
i(-18000,0,'EST'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'AWT'),
i(-10800,3600,'APT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
i(-10800,3600,'ADT'),
i(-14400,0,'AST'),
]
Moncton = Moncton()
| |
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
from functional_tests.base import FunctionalTest
from accounts.models import User
from accounts.tests.test_models import create_new_user
from accounts.tests.test_views import accounts_route_questionnaires
# @patch('wocat.views.generic_questionnaire_list')
# @patch.object(WocatAuthenticationBackend, 'authenticate')
# class LoginTest(FunctionalTest):
#
# def test_login(
# self, mock_authenticate, mock_get_user_id, mock_get_and_update,
# mock_questionnaire_list
# ):
#
# user = create_new_user()
#
# mock_get_and_update.return_value = user
# mock_authenticate.return_value = None
# mock_authenticate.__name__ = ''
# mock_get_user_id.return_value = user.id
# mock_questionnaire_list.return_value = {}
#
# # Alice opens her web browser and goes to the home page
# self.browser.get(self.live_server_url)
#
# # She sees the top navigation bar with the login button, on which she
# # clicks.
# navbar = self.findBy('class_name', 'top-bar')
# navbar.find_element_by_link_text('Login').click()
#
# # She tries to submit the form empty and sees that the form was
# # not submitted.
# self.findBy('id', 'button_login').click()
# self.findBy('name', 'username')
#
# # She enters some (wrong) user credentials
# self.findBy('name', 'username').send_keys('wrong@user.com')
# self.findBy('name', 'password').send_keys('wrong')
#
# # She tries to submit the form and sees an error message
# self.findBy('id', 'button_login').click()
# self.checkOnPage('Please enter a correct email address and password.')
#
# mock_authenticate.return_value = user
# self.browser.add_cookie({'name': 'fe_typo_user', 'value': 'session_id'})
#
# # She enters some (correct) user credentials
# self.findBy('name', 'password').send_keys('correct')
# self.findBy('id', 'button_login').click()
#
# # She sees that she was redirected to the landing page
# self.assertEqual(self.browser.current_url,
# self.live_server_url + reverse('wocat:home'))
# self.checkOnPage(user.get_display_name())
# self.checkOnPage('Logout')
class UserTest(FunctionalTest):
fixtures = [
'groups_permissions',
]
def test_superusers(self):
user = create_new_user()
user.is_superuser = True
user.save()
self.doLogin(user)
# Superusers see the link to the administration
self.findBy(
'xpath', '//ul[@class="dropdown"]/li/a[@href="/admin/"]')
# Superusers see the link to the Dashboard
self.findBy(
'xpath', '//ul[@class="dropdown"]/li/a[contains(@href, "search/'
'admin")]')
def test_administrators(self):
user = create_new_user()
user.groups = [Group.objects.get(pk=1)]
self.doLogin(user)
# Administrators see the link to the administration
self.findBy(
'xpath', '//ul[@class="dropdown"]/li/a[@href="/admin/"]')
# Administrators do not see the link to the Dashboard
self.findByNot(
'xpath', '//ul[@class="dropdown"]/li/a[contains(@href, "search/'
'admin")]')
def test_moderators(self):
user = create_new_user()
user.groups = [Group.objects.get(pk=3)]
self.doLogin(user)
# Moderators do not see the link to the administration
self.findByNot(
'xpath', '//ul[@class="dropdown"]/li/a[@href="/admin/"]')
# Moderators do not see the link to the Dashboard
self.findByNot(
'xpath', '//ul[@class="dropdown"]/li/a[contains(@href, "search/'
'admin")]')
def test_translators(self):
user = create_new_user()
user.groups = [Group.objects.get(pk=2)]
self.doLogin(user)
# Translators see the link to the administration
self.findBy(
'xpath', '//ul[@class="dropdown"]/li/a[@href="/admin/"]')
# Translators do not see the link to the Dashboard
self.findByNot(
'xpath', '//ul[@class="dropdown"]/li/a[contains(@href, "search/'
'admin")]')
# @patch('accounts.authentication.WocatAuthenticationBackend._do_auth')
# class LogoutTest(FunctionalTest):
# def test_logout(self, mock_do_auth):
# mock_do_auth.return_value = ('tempsessionid')
# # Alice logs in
# self.doLogin('a@b.com', 'foo')
# # She sees a logout button in the top navigation bar and clicks on it
# navbar = self.findBy('class_name', 'top-bar')
# navbar.find_element_by_link_text('Logout').click()
# # She notices she was redirected to the home page and is now logged
# # out (the top bar showing a login button)
# self.assertEqual(self.browser.current_url, self.live_server_url + '/')
# navbar = self.findBy('class_name', 'top-bar')
# navbar.find_element_by_link_text('Login')
class ModerationTest(FunctionalTest):
fixtures = [
'groups_permissions',
'global_key_values',
'sample',
'sample_questionnaire_status',
'sample_user',
]
def test_user_questionnaires(self):
user_alice = User.objects.get(pk=101)
user_moderator = User.objects.get(pk=103)
user_secretariat = User.objects.get(pk=107)
# Alice logs in
self.doLogin(user=user_alice)
# She logs in as moderator and sees that she can access the view
self.doLogin(user=user_moderator)
self.browser.get(self.live_server_url + reverse(
accounts_route_questionnaires))
self.wait_for(
'xpath', '//img[@src="/static/assets/img/ajax-loader.gif"]',
visibility=False)
# She sees all the Questionnaires which are submitted plus the one where
# he is compiler
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[1]//a['
'contains(text(), "Foo 6")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[2]//a['
'contains(text(), "Foo 2")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[3]//a['
'contains(text(), "Foo 8")]',
wait=True)
list_entries = self.findManyBy(
'xpath', '//article[contains(@class, "tech-item")]')
self.assertEqual(len(list_entries), 3)
# He logs in as WOCAT secretariat
self.doLogin(user=user_secretariat)
self.browser.get(self.live_server_url + reverse(
accounts_route_questionnaires))
self.wait_for(
'xpath', '//img[@src="/static/assets/img/ajax-loader.gif"]',
visibility=False)
# She sees all the Questionnaires (2 drafts, 2 submitted, 2 reviewed and
# 1 rejected)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[1]//a['
'contains(text(), "Foo 1")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[2]//a['
'contains(text(), "Foo 6")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[3]//a['
'contains(text(), "Foo 2")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[4]//a['
'contains(text(), "Foo 8")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[5]//a['
'contains(text(), "Foo 7")]',
wait=True)
self.findBy(
'xpath', '(//article[contains(@class, "tech-item")])[6]//a['
'contains(text(), "Foo 9")]',
wait=True)
list_entries = self.findManyBy(
'xpath', '//article[contains(@class, "tech-item")]')
self.assertEqual(len(list_entries), 6)
| |
# Copyright (C) 2015 Cisco, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author(s): Julian Edwards, Raphael Badin
"""A fixture that creates a neutron network in Openstack."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
"NeutronNetworkFixture",
"RouterFixture",
"SecurityGroupRuleFixture",
]
from copy import copy
import fixtures
from testiny.clients import get_neutron_client
from testiny.config import CONF
from testiny.utils import synchronized
from testiny.factory import factory
from testiny.utils import wait_until
from testtools.content import text_content
SUBNET_ID_MIN = 11
SUBNET_ID_MAX = 254
class NeutronNetworkFixture(fixtures.Fixture):
"""Test fixture that creates a randomly-named neutron network.
The name is available as the 'name' property after creation.
"""
# A record of the available subnet ids.
available_subnet_ids = set(
range(SUBNET_ID_MAX, SUBNET_ID_MIN - 1, -1))
def __init__(self, project_fixture):
super(NeutronNetworkFixture, self).__init__()
self.project_fixture = project_fixture
@classmethod
@synchronized
def get_subnet_id(cls):
"""Get an available subnet id."""
return cls.available_subnet_ids.pop()
@classmethod
@synchronized
def release_subnet_id(cls, id):
"""Release the given subnet id."""
cls.available_subnet_ids.add(id)
def _setUp(self):
super(NeutronNetworkFixture, self)._setUp()
self.neutron = get_neutron_client(
project_name=self.project_fixture.name,
user_name=self.project_fixture.admin_user.name,
password=self.project_fixture.admin_user_fixture.password)
self.subnet_id = self.get_subnet_id()
cidr = CONF.network['cidr'].format(subnet=self.subnet_id)
# TODO: handle clashes and retry.
self.net_name = factory.make_obj_name("network")
self.sub_name = factory.make_obj_name("subnet")
self.network = self.neutron.create_network(
{"network": dict(name=self.net_name)})
network_id = self.network["network"]["id"]
self.subnet = self.neutron.create_subnet(
{"subnet": dict(
name=self.sub_name, network_id=network_id, cidr=cidr,
ip_version=4)})
self.addCleanup(self.delete_network)
self.addDetail(
'NeutronNetworkFixture-network',
text_content('Network %s created' % self.net_name))
self.addDetail(
'NeutronNetworkFixture-subnet',
text_content('Subnet %s created (cidr=%s)' % (
self.sub_name, cidr)))
def delete_network(self):
self.neutron.delete_subnet(self.subnet["subnet"]["id"])
self.neutron.delete_network(self.network["network"]["id"])
self.release_subnet_id(self.subnet_id)
def get_network(self, network_name):
"""Fetch network object given its network name.
Can be used to return networks other than the fixture's in the
context of the project, e.g. external networks.
Returns None if not found.
"""
networks = self.neutron.list_networks(name=network_name)['networks']
return networks[0] if len(networks) == 1 else None
def get_external_gateway_ip(self, subnet_index=0):
"""Return the gateway IP of a subnet in the public network."""
external_network = self.get_network(CONF.network['external_network'])
subnets = self.neutron.list_subnets(
network_id=external_network['id'])['subnets']
return subnets[subnet_index]['gateway_ip']
class RouterFixture(fixtures.Fixture):
"""Test fixture that creates a randomly-named neutron router.
The name is available as the 'name' property after creation.
"""
def __init__(self, project_fixture):
super(RouterFixture, self).__init__()
self.project_fixture = project_fixture
self.subnet_ids = []
def setUp(self):
super(RouterFixture, self).setUp()
self.neutron = get_neutron_client(
project_name=self.project_fixture.name,
user_name=self.project_fixture.admin_user.name,
password=self.project_fixture.admin_user_fixture.password)
# TODO: handle clashes and retry.
self.name = factory.make_obj_name("router")
self.router = self.neutron.create_router(
{'router': {'name': self.name, 'admin_state_up': True}})
self.addCleanup(self.delete_router)
self.addDetail(
'RouterFixture-network',
text_content('Router %s created' % self.name))
self.wait_until_active()
def wait_until_active(self):
wait_until(lambda: self.refresh()['router']['status'] == 'ACTIVE')
def refresh(self):
"""Refresh the self.router object."""
routers = self.neutron.list_routers(
id=self.router['router']['id'])
if len(routers) == 1 and len(routers['routers']) == 1:
self.router['router'] = routers['routers'][0]
return self.router
def add_interface_router(self, subnet_id):
self.neutron.add_interface_router(
self.router["router"]["id"], {'subnet_id': subnet_id})
self.subnet_ids.append(subnet_id)
def remove_interface_router(self, subnet_id):
self.neutron.remove_interface_router(
self.router["router"]["id"], {'subnet_id': subnet_id})
self.subnet_ids.remove(subnet_id)
def add_gateway_router(self, network_id):
self.neutron.add_gateway_router(
self.router["router"]["id"], {'network_id': network_id})
def remove_gateway_router(self):
self.neutron.remove_gateway_router(
self.router["router"]["id"])
def delete_router(self):
# Delete interfaces first.
# Make a copy of the list since it's amended by
# remove_interface_router as the IDs become available again.
for subnet_id in copy(self.subnet_ids):
self.remove_interface_router(subnet_id)
# Clear gateway.
self.remove_gateway_router()
# Delete router.
self.neutron.delete_router(self.router["router"]["id"])
self.addDetail(
'RouterFixture-network',
text_content('Router %s deleted' % self.name))
class SecurityGroupRuleFixture(fixtures.Fixture):
"""Test fixture that creates a security group rule.
This assumes the security group already exists.
"""
def __init__(self, project_fixture, security_group_name, direction,
protocol, port_range_min=None, port_range_max=None):
super(SecurityGroupRuleFixture, self).__init__()
self.project_fixture = project_fixture
self.security_group_name = security_group_name
self.direction = direction
self.protocol = protocol
self.port_range_min = port_range_min
self.port_range_max = port_range_max
def setUp(self):
super(SecurityGroupRuleFixture, self).setUp()
self.neutron = get_neutron_client(
project_name=self.project_fixture.name,
user_name=self.project_fixture.admin_user.name,
password=self.project_fixture.admin_user_fixture.password)
self.load_security_group()
self.security_group_rule = self.neutron.create_security_group_rule(
{
'security_group_rule': {
'direction': self.direction,
'security_group_id': self.security_group['id'],
'protocol': self.protocol,
'port_range_max': self.port_range_max,
'port_range_min': self.port_range_min,
}
})
self.addDetail(
'SecurityGroupRuleFixture-network',
text_content(
'Security group rule %s created' % (
self.security_group_rule
['security_group_rule']
['security_group_id'])))
self.addCleanup(self.delete_security_group_rule)
def load_security_group(self):
sec_groups = (
self.neutron.list_security_groups()['security_groups']
)
sec_groups = [
group
for group in sec_groups
if group['tenant_id'] == self.project_fixture.project.id and
group['name'] == self.security_group_name
]
if len(sec_groups) != 1:
raise Exception(
"Can't find security group named '%s'" %
self.security_group_name)
self.security_group = sec_groups[0]
def delete_security_group_rule(self):
self.neutron.delete_security_group_rule(
self.security_group_rule['security_group_rule']['id'])
self.addDetail(
'SecurityGroupRuleFixture-network',
text_content(
'Security group rule %s deleted' %
self.security_group_rule))
| |
"""
The json module introduces JSON resource and JSON parsing.
"""
from enum import unique, Enum
from functools import lru_cache
import json
from .resource import Resource
from ..ecore import EObject, EProxy, ECollection, EClass, EEnumLiteral
@unique
class JsonOptions(Enum):
SERIALIZE_DEFAULT_VALUES = 0
NO_OBJECT = object()
class JsonResource(Resource):
def __init__(self, uri=None, use_uuid=False, indent=None, ref_tag='$ref'):
super().__init__(uri, use_uuid)
self._resolve_later = []
self._load_href = {}
self.indent = indent
self.ref_tag = ref_tag
self.mappers = {}
self.default_mapper = DefaultObjectMapper()
def load(self, options=None):
self.cache_enabled = True
json_value = self.uri.create_instream()
d = json.loads(json_value.read().decode('utf-8'))
if isinstance(d, list):
for x in d:
self.to_obj(x, first=True)
else:
self.to_obj(d, first=True)
self.uri.close_stream()
for inst, refs in self._load_href.items():
self.process_inst(inst, refs)
self._load_href.clear()
self._feature_cache.clear()
self.cache_enabled = False
def save(self, output=None, options=None):
self.options = options or {}
stream = self.open_out_stream(output)
dict_list = []
for root in self.contents:
dict_list.append(self.to_dict(root))
if len(dict_list) <= 1:
dict_list = dict_list[0]
stream.write(json.dumps(dict_list, indent=self.indent)
.encode('utf-8'))
stream.flush()
self.uri.close_stream()
self.options = None
def _uri_fragment(self, obj):
if obj.eResource == self:
use_id = self.use_uuid
else:
use_id = obj.eResource and obj.eResource.use_uuid
if use_id:
self._assign_uuid(obj)
return obj._internal_id
else:
return obj.eURIFragment()
@staticmethod
def serialize_eclass(eclass):
return '{}{}'.format(eclass.eRoot().nsURI, eclass.eURIFragment())
def register_mapper(self, eclass, mapper_class):
if hasattr(eclass, 'python_class'):
eclass = eclass.python_class
self.mappers[eclass] = mapper_class
def object_uri(self, obj):
if obj.eResource == self:
resource_uri = ''
else:
resource_uri = obj.eResource.uri if obj.eResource else ''
return '{}{}'.format(resource_uri, self._uri_fragment(obj))
def _to_ref_from_obj(self, obj, opts=None, use_uuid=None, resource=None):
uri = self.serialize_eclass(obj.eClass)
ref = {'eClass': uri}
ref[self.ref_tag] = self.object_uri(obj)
return ref
def to_dict(self, obj, is_ref=False):
if isinstance(obj, type) and issubclass(obj, EObject):
if is_ref:
fun = self._to_ref_from_obj
return fun(obj.eClass, self.options, self.use_uuid, self)
# else:
# cls = obj.python_class
# mapper = next((self.mappers[k] for k in self.mappers
# if issubclass(cls, k)), self.default_mapper)
# fun = mapper.to_dict_from_obj
elif isinstance(obj, EEnumLiteral):
return obj.name
elif isinstance(obj, EObject):
if is_ref:
fun = self._to_ref_from_obj
else:
cls = obj.eClass.python_class
mapper = next((self.mappers[k] for k in self.mappers
if issubclass(cls, k)), self.default_mapper)
fun = mapper.to_dict_from_obj
return fun(obj, self.options, self.use_uuid, self)
elif isinstance(obj, ECollection):
fun = self._to_ref_from_obj if is_ref else self.to_dict
result = []
for x in obj:
write_object = fun(x)
if write_object is NO_OBJECT:
continue
result.append(write_object)
return result
else:
return obj
@lru_cache()
def resolve_eclass(self, uri_eclass):
return self.resolve_object(uri_eclass)
def to_obj(self, d, owning_feature=None, first=False):
is_ref = self.ref_tag in d
if is_ref:
return EProxy(path=d[self.ref_tag], resource=self)
excludes = ['eClass', self.ref_tag, 'uuid']
if 'eClass' in d:
uri_eclass = d['eClass']
eclass = self.resolve_eclass(uri_eclass)
else:
eclass = owning_feature._eType
if not eclass:
raise ValueError('Unknown metaclass for uri "{}"'
.format(uri_eclass))
if eclass in (EClass.eClass, EClass):
inst = eclass(d['name'])
excludes.append('name')
else:
inst = eclass()
if first:
self.use_uuid = 'uuid' in d
self.append(inst)
if self.use_uuid:
self.uuid_dict[d['uuid']] = inst
eattributes = []
containments = []
ereferences = []
eclass = inst.eClass
for key, value in d.items():
if key in excludes:
continue
feature = self._find_feature(eclass, key)
if not feature:
raise ValueError('Unknown feature {} for object "{}"'
.format(key, eclass))
if feature.is_attribute:
eattributes.append((feature, value))
else:
if feature.containment:
containments.append((feature, value))
elif feature.eOpposite is not owning_feature:
ereferences.append((feature, value))
self.process_inst(inst, eattributes)
self.process_inst(inst, containments, owning_feature)
self._load_href[inst] = ereferences
return inst
def process_inst(self, inst, features, owning_feature=None):
for feature, value in features:
if isinstance(value, dict):
element = self.to_obj(value, owning_feature=feature)
inst.eSet(feature, element)
elif isinstance(value, list):
if feature.is_reference:
elements = [self.to_obj(x, owning_feature=feature)
for x in value]
elements = [x for x in elements if x is not None]
else:
elements = [feature._eType.from_string(x) for x in value]
inst.eGet(feature).extend(elements)
elif isinstance(value, str):
inst.eSet(feature, feature._eType.from_string(value))
else:
inst.eSet(feature, value)
class DefaultObjectMapper(object):
def to_dict_from_obj(self, obj, options, use_uuid, resource):
d = {}
containingFeature = obj.eContainmentFeature()
if not containingFeature or obj.eClass is not containingFeature._eType:
uri = resource.serialize_eclass(obj.eClass)
d['eClass'] = uri
for attr in obj._isset:
if attr.derived or attr.transient:
continue
is_ereference = attr.is_reference
is_ref = is_ereference and not attr.containment
if is_ereference and attr.eOpposite:
if attr.eOpposite is containingFeature:
continue
value = obj.eGet(attr)
serialize_default_option = JsonOptions.SERIALIZE_DEFAULT_VALUES
if (not options.get(serialize_default_option, False)
and value == attr.get_default_value()):
continue
write_object = resource.to_dict(value, is_ref=is_ref)
if write_object is not NO_OBJECT:
d[attr.name] = write_object
if use_uuid:
resource._assign_uuid(obj)
d['uuid'] = obj._internal_id
return d
| |
from __future__ import unicode_literals, division, absolute_import
import functools
from collections import Mapping
from datetime import datetime
from sqlalchemy import extract, func
from sqlalchemy.orm import synonym
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from flexget.manager import Session
from flexget.utils import qualities
def with_session(*args, **kwargs):
""""
A decorator which creates a new session if one was not passed via keyword argument to the function.
Automatically commits and closes the session if one was created, caller is responsible for commit if passed in.
If arguments are given when used as a decorator, they will automatically be passed to the created Session when
one is not supplied.
"""
def decorator(func):
def wrapper(*args, **kwargs):
if kwargs.get('session'):
return func(*args, **kwargs)
with _Session() as session:
kwargs['session'] = session
return func(*args, **kwargs)
return wrapper
if len(args) == 1 and not kwargs and callable(args[0]):
# Used without arguments, e.g. @with_session
# We default to expire_on_commit being false, in case the decorated function returns db instances
_Session = functools.partial(Session, expire_on_commit=False)
return decorator(args[0])
else:
# Arguments were specified, turn them into arguments for Session creation e.g. @with_session(autocommit=True)
_Session = functools.partial(Session, *args, **kwargs)
return decorator
def pipe_list_synonym(name):
"""Converts pipe separated text into a list"""
def getter(self):
attr = getattr(self, name)
if attr:
return attr.strip('|').split('|')
def setter(self, value):
if isinstance(value, basestring):
setattr(self, name, value)
else:
setattr(self, name, '|'.join(value))
return synonym(name, descriptor=property(getter, setter))
def text_date_synonym(name):
"""Converts Y-M-D date strings into datetime objects"""
def getter(self):
return getattr(self, name)
def setter(self, value):
if isinstance(value, basestring):
try:
setattr(self, name, datetime.strptime(value, '%Y-%m-%d'))
except ValueError:
# Invalid date string given, set to None
setattr(self, name, None)
else:
setattr(self, name, value)
return synonym(name, descriptor=property(getter, setter))
def safe_pickle_synonym(name):
"""Used to store Entry instances into a PickleType column in the database.
In order to ensure everything can be loaded after code changes, makes sure no custom python classes are pickled.
"""
def only_builtins(item):
"""Casts all subclasses of builtin types to their builtin python type. Works recursively on iterables.
Raises ValueError if passed an object that doesn't subclass a builtin type.
"""
supported_types = [str, unicode, int, float, long, bool, datetime]
# dict, list, tuple and set are also supported, but handled separately
if type(item) in supported_types:
return item
elif isinstance(item, Mapping):
result = {}
for key, value in item.iteritems():
try:
result[key] = only_builtins(value)
except TypeError:
continue
return result
elif isinstance(item, (list, tuple, set)):
result = []
for value in item:
try:
result.append(only_builtins(value))
except ValueError:
continue
if isinstance(item, list):
return result
elif isinstance(item, tuple):
return tuple(result)
else:
return set(result)
else:
for s_type in supported_types:
if isinstance(item, s_type):
return s_type(item)
# If item isn't a subclass of a builtin python type, raise ValueError.
raise TypeError('%r is not a subclass of a builtin python type.' % type(item))
def getter(self):
return getattr(self, name)
def setter(self, entry):
setattr(self, name, only_builtins(entry))
return synonym(name, descriptor=property(getter, setter))
class CaseInsensitiveWord(Comparator):
"""Hybrid value representing a string that compares case insensitively."""
def __init__(self, word):
if isinstance(word, CaseInsensitiveWord):
self.word = word.word
else:
self.word = word
def lower(self):
if isinstance(self.word, basestring):
return self.word.lower()
else:
return func.lower(self.word)
def operate(self, op, other):
if not isinstance(other, CaseInsensitiveWord):
other = CaseInsensitiveWord(other)
return op(self.lower(), other.lower())
def __clause_element__(self):
return self.lower()
def __str__(self):
return self.word
def __getattr__(self, item):
"""Expose string methods to be called directly on this object."""
return getattr(self.word, item)
def quality_property(text_attr):
def getter(self):
return qualities.Quality(getattr(self, text_attr))
def setter(self, value):
if isinstance(value, basestring):
setattr(self, text_attr, value)
else:
setattr(self, text_attr, value.name)
class QualComparator(Comparator):
def operate(self, op, other):
if isinstance(other, qualities.Quality):
other = other.name
return op(self.__clause_element__(), other)
def comparator(self):
return QualComparator(getattr(self, text_attr))
prop = hybrid_property(getter, setter)
prop.comparator(comparator)
return prop
def quality_requirement_property(text_attr):
def getter(self):
return qualities.Requirements(getattr(self, text_attr))
def setter(self, value):
if isinstance(value, basestring):
setattr(self, text_attr, value)
else:
setattr(self, text_attr, value.text)
prop = hybrid_property(getter, setter)
return prop
def ignore_case_property(text_attr):
def getter(self):
return CaseInsensitiveWord(getattr(self, text_attr))
def setter(self, value):
setattr(self, text_attr, value)
return hybrid_property(getter, setter)
def year_property(date_attr):
def getter(self):
date = getattr(self, date_attr)
return date and date.year
def expr(cls):
return extract('year', getattr(cls, date_attr))
return hybrid_property(getter, expr=expr)
| |
#!/usr/bin/env python
import logging
import exifread
import os
import shutil
import hashlib
import sys
import time
import datetime
import ntpath
import glob
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
# create logger
logger = logging.getLogger('moveExifFiles')
logger.setLevel(logging.INFO)
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
fileHandler = logging.FileHandler("{0}/{1}.log".format("/home/storage/exif-logs", datetime.datetime.now().isoformat()))
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
class InputArguments(object):
# create logger
logger = logging.getLogger('InputArguments')
logger.setLevel(logging.INFO)
def __init__(self, argv):
if len(argv) < 3:
print("Usage: moveExifFiles.py <directory to process> <destination directory>")
exit(1)
self.dirname = argv[1]
self.destdirname = argv[2]
logger.info("Using source directory (recursive): {}".format(self.dirname))
logger.info("Using destination directory: {}".format(self.destdirname))
if not self.dirname:
print "source directory cannot be empty!"
exit(1)
if not self.destdirname:
print "destination directory cannot be empty!"
exit(1)
if not os.path.isdir(self.dirname):
print("source directory {} does not exist!".format(self.dirname))
exit(1)
if not os.path.isdir(self.destdirname):
self.logger.info ("destination directory {} does not exist, creating it...".format(self.destdirname))
print ("Created dir {}".format(self.destdirname))
os.makedirs(self.destdirname)
def abs_src_path(self, filename):
return self.dirname + os.path.sep + filename
def abs_dest_path(self, filename):
return self.destdirname + os.path.sep + filename
class ExifFileReader(object):
# create logger
logger = logging.getLogger('ExifFileReader')
logger.setLevel(logging.WARNING)
def __init__(self, filename):
self.filename = filename
# returned date format YYYY-mm-dd
def readExifDate(self):
date = ""
#open file, do EXIF stuf
with open(self.filename,'rb') as file:
data=exifread.process_file(file)
self.logger.debug(data)
if data:
exifDateTimeField = data.get('EXIF DateTimeOriginal', "")
if exifDateTimeField:
self.logger.debug(self.filename + " - " + str(exifDateTimeField))
date = str(exifDateTimeField).replace(":", "-").split()[0]
self.logger.debug(date)
else:
self.logger.debug("no EXIF DateTimeOriginal field in the EXIF data")
else:
self.logger.debug("no EXIF in file {}".format(self.filename))
return date
class Stats(object):
# create logger
logger = logging.getLogger('Stats')
logger.setLevel(logging.WARNING)
def __init__(self):
self.stats = {}
self.total = {}
def report(self, key, name):
dirStats = self.stats.get(key, {})
curr = dirStats.get(name, 0)
dirStats[name]=curr + 1
self.stats[key]=dirStats
curtype = self.total.get(name, 0)
self.total[name]=curtype + 1
def __repr__(self):
str = []
if self.stats:
str.append("\nSummary: \n")
for cdir, cdirStats in self.stats.items():
str.append("\nDirectory {}".format(cdir))
for op, count in cdirStats.items():
str.append("\t{} files {}".format(count, op))
for op, count in self.total.items():
str.append("\n\nTotal {} files {}".format(count, op))
else:
str.append("\nNo stats were updated.")
out_str = ''.join(str)
return out_str
def create_directories(args, date):
# get year from date
year = date.split("-")[0]
dst_dir_path = args.abs_dest_path(year + os.path.sep + date)
logger.debug("searching for directories with date {} ...".format(date))
simdirs = glob.glob("{}*".format(dst_dir_path))
# check if list is not empty
if simdirs:
logger.debug("found dirs: {}".format(simdirs))
# choose the longest named dir, probably it has additional description in its name
sorted(simdirs, key=len, reverse=True)
dst_dir_path = simdirs[0]
else:
logger.debug("no directories exist.")
# check if dir with date exists, if not create it
if not os.path.isdir(dst_dir_path):
logger.info ("Destination directory \'{}\' does not exist, creating it...".format(dst_dir_path))
os.makedirs(dst_dir_path)
print ("Created dir {}".format(dst_dir_path))
else:
logger.info('Destination directory \'{}\' already exists. Using it.'.format(dst_dir_path))
return dst_dir_path
def handle_file(args, stats, file):
src_file_path = os.path.abspath(file)
logger.debug("Handling file {} ...".format(src_file_path))
exifReader = ExifFileReader(src_file_path)
exifDate = exifReader.readExifDate()
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file)
# by default, use modified data in format
date = datetime.datetime.fromtimestamp(mtime).strftime('%Y-%m-%d')
if exifDate:
# exif date if exists
date = exifDate
if date:
dst_dir_path = create_directories(args, date)
dst_file_path = dst_dir_path + os.path.sep + ntpath.basename(file)
if os.path.isfile(dst_file_path):
# files with the same name exist
# check their hashes, whether they are the same
currfileHash = md5(src_file_path)
newfileHash = md5(dst_file_path)
if currfileHash == newfileHash:
logger.debug("file {} already exists, deleted from source {}".format(dst_file_path, src_file_path))
stats.report(dst_dir_path, 'existed')
stats.report(src_file_path, 'deleted')
os.remove(src_file_path)
else:
logger.debug("file {} already exists, but the source file {} itself is different, will be moved with different name".format(dst_file_path, src_file_path))
i = 1
new_dst_file_name = dst_file_path+'_'+str(i)
while os.path.isfile(new_dst_file_name):
logger.debug("file {} already exists, incrementing 1 to suffix")
i = i + 1
new_dst_file_name = dst_file_path+'_'+str(i)
shutil.move(src_file_path, new_dst_file_name)
stats.report(dst_dir_path, 'renamed')
else:
shutil.move(src_file_path, dst_file_path)
logger.debug("file {} moved to {}".format(src_file_path, dst_file_path))
stats.report(dst_dir_path, 'moved')
def handle_dir(args, stats, dir):
abs_dir = os.path.abspath(dir)
start = time.time()
logger.info("Processing folder {} ...".format(abs_dir))
for root, subdirs, files in os.walk(abs_dir):
logger.debug(subdirs)
logger.debug(files)
for f in files:
file_path = os.path.join(root, f)
handle_file(args, stats, file_path)
for subdir in subdirs:
subdir_path = os.path.join(root, subdir)
handle_dir(args, stats, subdir_path)
# remove empty dirs
removeEmptyFolders(abs_dir)
end = time.time()
tookSec = end - start
logger.info("Done with folder {} ({} sec).\n".format(abs_dir, tookSec))
def removeEmptyFolders(path, removeRoot=True):
'Function to remove empty folders'
if not os.path.isdir(path):
return
# remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
removeEmptyFolders(fullpath)
# if folder empty, delete it
files = os.listdir(path)
if len(files) == 0 and removeRoot:
logger.info("Removing empty folder: {}".format(path))
os.rmdir(path)
def main(argv):
args = InputArguments(argv)
stats = Stats()
abs_src_dir = os.path.abspath(args.dirname)
handle_dir(args, stats, abs_src_dir)
logger.info(stats)
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
if __name__ == "__main__":
main(sys.argv)
| |
from typing import Optional, Union, Dict
import os
import shutil
import os.path as osp
import torch
import numpy as np
import pandas as pd
from ogb.utils.url import decide_download, download_url, extract_zip, makedirs
class WikiKG90Mv2Dataset(object):
def __init__(self, root: str = 'dataset'):
self.original_root = root
self.folder = osp.join(root, 'wikikg90m-v2')
self.version = 1
# Old url hosted at Stanford
# md5sum: bfd6257134b7eb59e2edc0a4af21faa8
# self.url = 'http://ogb-data.stanford.edu/data/lsc/wikikg90m-v2.zip'
# New url hosted by DGL team at AWS--much faster to download
self.url = 'https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/wikikg90m-v2.zip'
self.processed_dir = osp.join(self.folder, 'processed')
if osp.isdir(osp.join(self.folder, 'mapping')):
shutil.rmtree(osp.join(self.folder, 'mapping'))
if osp.isdir(self.folder) and (not osp.exists(osp.join(self.folder, f'RELEASE_v{self.version}.txt'))):
print('WikiKG90M-v2 dataset has been updated.')
if input('Will you update the dataset now? (y/N)\n') == 'y':
shutil.rmtree(osp.join(self.folder))
self.download()
self.__meta__ = torch.load(osp.join(self.folder, 'meta.pt'))
# training triplet
path = osp.join(self.processed_dir, 'train_hrt.npy')
self._train_hrt = np.load(path)
# node/edge features
self._entity_feat = None
self._all_entity_feat = None
self._relation_feat = None
# Validation
self._valid_dict = None
# Test
self._test_dev_dict = None
self._test_challenge_dict = None
def download(self):
if not osp.exists(self.folder):
if decide_download(self.url):
path = download_url(self.url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
else:
print('Stop download.')
exit(-1)
@property
def num_entities(self) -> int:
return self.__meta__['num_entities']
@property
def num_relations(self) -> int:
return self.__meta__['num_relations']
@property
def num_feat_dims(self) -> int:
'''
Dimensionality of relation and entity features obtained by paraphrase-mpnet-base-v2
'''
return 768
@property
def entity_feat(self) -> np.ndarray:
'''
Entity feature
- np.ndarray of shape (num_entities, num_feat_dims)
i-th row stores the feature of i-th entity
* Loading everything into memory at once
* saved in np.float16
'''
if self._entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._entity_feat = np.load(path, mmap_mode='r')
return self._entity_feat
@property
def all_entity_feat(self) -> np.ndarray:
if self._all_entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._all_entity_feat = np.load(path)
return self._all_entity_feat
@property
def relation_feat(self) -> np.ndarray:
'''
Relation feature
- np.ndarray of shape (num_relations, num_feat_dims)
i-th row stores the feature of i-th relation
* saved in np.float16
'''
if self._relation_feat is None:
path = osp.join(self.processed_dir, 'relation_feat.npy')
self._relation_feat = np.load(path)
return self._relation_feat
@property
def all_relation_feat(self) -> np.ndarray:
'''
For completeness.
#relations is small, so everything can be loaded into CPU memory.
'''
return self.relation_feat
@property
def train_hrt(self) -> np.ndarray:
'''
Training triplets (h, r, t)
- np.ndarray of shape (num_triplets, 3)
- i-th row corresponds to the i-th triplet (h, r, t)
'''
return self._train_hrt
@property
def valid_dict(self) -> Dict[str, Dict[str, np.ndarray]]:
'''
- h,r->t: Given head and relation, predict target entities
- hr: np.ndarray of shape (num_validation_triplets, 2)
i-th row stores i-th (h,r)
- t: np.ndarray of shape (num_validation_triplets,)
i-th row stores i-th index for tail entities
'''
if self._valid_dict is None:
self._valid_dict = {}
# h, r -> t
self._valid_dict['h,r->t'] = {}
self._valid_dict['h,r->t']['hr'] = np.load(osp.join(self.processed_dir, 'val_hr.npy'))
self._valid_dict['h,r->t']['t'] = np.load(osp.join(self.processed_dir, 'val_t.npy'))
return self._valid_dict
def test_dict(self, mode: str) -> Dict[str, Dict[str, np.ndarray]]:
'''
- h,r->t: Given head and relation, predict target entities
- hr: np.ndarray of shape (num_test_triplets, 2)
i-th row stores i-th (h,r)
'''
assert mode in ['test-dev', 'test-challenge']
if mode == 'test-dev':
if self._test_dev_dict is None:
self._test_dev_dict = {}
# h, r -> t
self._test_dev_dict['h,r->t'] = {}
self._test_dev_dict['h,r->t']['hr'] = np.load(osp.join(self.processed_dir, 'test-dev_hr.npy'))
return self._test_dev_dict
elif mode == 'test-challenge':
if self._test_challenge_dict is None:
self._test_challenge_dict = {}
# h, r -> t
self._test_challenge_dict['h,r->t'] = {}
self._test_challenge_dict['h,r->t']['hr'] = np.load(osp.join(self.processed_dir, 'test-challenge_hr.npy'))
return self._test_challenge_dict
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
class WikiKG90Mv2Evaluator:
def eval(self, input_dict):
'''
Format of input_dict:
- 'h,r->t'
- t_pred_top10: np.ndarray of shape (num_eval_triplets, 10)
(i,j) represents the j-th prediction for i-th triplet
Only top10 prediction is taken into account
- t: np.ndarray of shape (num_eval_triplets,)
'''
assert 'h,r->t' in input_dict
assert ('t_pred_top10' in input_dict['h,r->t']) and ('t' in input_dict['h,r->t'])
# h,r->t
t_pred_top10 = input_dict['h,r->t']['t_pred_top10']
t = input_dict['h,r->t']['t']
if not isinstance(t_pred_top10, torch.Tensor):
t_pred_top10 = torch.from_numpy(t_pred_top10)
if not isinstance(t, torch.Tensor):
t = torch.from_numpy(t)
assert t_pred_top10.shape[1] == 10 and len(t_pred_top10) == len(t)
# verifying that there is no duplicated prediction for each triplet
duplicated = False
for i in range(len(t_pred_top10)):
if len(torch.unique(t_pred_top10[i][t_pred_top10[i] >= 0])) != len(t_pred_top10[i][t_pred_top10[i] >= 0]):
duplicated = True
break
if duplicated:
print('Found duplicated tail prediction for some triplets! MRR is automatically set to 0.')
mrr = 0
else:
mrr = self._calculate_mrr(t.to(t_pred_top10.device), t_pred_top10)
return {'mrr': mrr}
def _calculate_mrr(self, t, t_pred_top10):
'''
- t: shape (num_eval_triplets, )
- t_pred_top10: shape (num_eval_triplets, 10)
'''
tmp = torch.nonzero(t.view(-1,1) == t_pred_top10, as_tuple=False)
# reciprocal rank
# if rank is larger than 10, then set the reciprocal rank to 0.
rr = torch.zeros(len(t)).to(tmp.device)
rr[tmp[:,0]] = 1./(tmp[:,1].float() + 1.)
# mean reciprocal rank
return float(rr.mean().item())
def save_test_submission(self, input_dict: Dict, dir_path: str, mode: str):
assert 'h,r->t' in input_dict
assert 't_pred_top10' in input_dict['h,r->t']
assert mode in ['test-dev', 'test-challenge']
t_pred_top10 = input_dict['h,r->t']['t_pred_top10']
for i in range(len(t_pred_top10)):
assert len(pd.unique(t_pred_top10[i])) == len(t_pred_top10[i]), 'Found duplicated tail prediction for some triplets!'
if mode == 'test-dev':
assert t_pred_top10.shape == (15000, 10)
filename = osp.join(dir_path, 't_pred_wikikg90m-v2_test-dev')
elif mode == 'test-challenge':
assert t_pred_top10.shape == (10000, 10)
filename = osp.join(dir_path, 't_pred_wikikg90m-v2_test-challenge')
makedirs(dir_path)
if isinstance(t_pred_top10, torch.Tensor):
t_pred_top10 = t_pred_top10.cpu().numpy()
t_pred_top10 = t_pred_top10.astype(np.int32)
np.savez_compressed(filename, t_pred_top10=t_pred_top10)
if __name__ == '__main__':
dataset = WikiKG90Mv2Dataset(root = '/dfs/user/weihuahu/ogb-lsc/datasets/wikikg90m-v2/')
print(dataset)
print(dataset.num_entities)
print(dataset.entity_feat)
print(dataset.entity_feat.shape)
print(dataset.num_relations)
print(dataset.relation_feat)
print(dataset.all_relation_feat)
print(dataset.relation_feat.shape)
print(dataset.train_hrt)
print(dataset.valid_dict)
print(dataset.test_dict(mode = 'test-dev'))
print(dataset.test_dict(mode = 'test-challenge'))
evaluator = WikiKG90Mv2Evaluator()
t = np.random.randint(10000000, size = (10000,))
t_pred_top10 = np.random.randint(10000000, size = (10000,10))
rank = np.random.randint(10, size = (10000,))
t_pred_top10[np.arange(len(rank)), rank] = t
print(evaluator.eval({'h,r->t': {'t': t, 't_pred_top10': t_pred_top10}}))
print(np.mean(1./(rank + 1)))
t_pred_top10 = np.random.randint(10000000, size = (15000,10))
evaluator.save_test_submission(
input_dict = {'h,r->t': {'t_pred_top10': t_pred_top10}},
dir_path = 'results',
mode = 'test-dev',
)
| |
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo.utils import encodeutils
from oslo.utils import strutils
import prettytable
import six
from six import moves
from magnumclient.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
| |
import datetime
import re
from typing import Any, Dict, List, Mapping
from unittest import mock
import orjson
from django.conf import settings
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_change_plan_type,
do_change_realm_subdomain,
do_create_realm,
do_deactivate_realm,
do_deactivate_stream,
do_scrub_realm,
do_send_realm_reactivation_email,
do_set_realm_property,
)
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.send_email import send_future_email
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import reset_emails_in_zulip_realm, tornado_redirected_to_list
from zerver.models import (
Attachment,
CustomProfileField,
Message,
Realm,
ScheduledEmail,
UserMessage,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_email,
get_user_profile_by_id,
)
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, user_profile: UserProfile,
new_realm_name: str) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_realm_creation_ensures_internal_realms(self) -> None:
with mock.patch("zerver.lib.actions.server_initialized", return_value=False):
with mock.patch("zerver.lib.actions.create_internal_realm") as mock_create_internal, \
self.assertLogs(level='INFO') as info_logs:
do_create_realm("testrealm", "Test Realm")
mock_create_internal.assert_called_once()
self.assertEqual(info_logs.output, [
'INFO:root:Server not yet initialized. Creating the internal realm first.'
])
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = 'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm('zulip')
new_name = 'Puliz'
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self) -> None:
realm = get_realm('zulip')
new_description = 'zulip dev group'
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self) -> None:
self.login('iago')
new_description = 'zulip dev group'
data = dict(description=orjson.dumps(new_description).decode())
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self) -> None:
new_description = 'A' * 1001
data = dict(description=orjson.dumps(new_description).decode())
# create an admin user
self.login('iago')
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = 'A' * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=orjson.dumps(new_name).decode())
# create an admin user
self.login('iago')
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Organization name is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = 'Mice will play while the cat is away'
self.login('othello')
req = dict(name=orjson.dumps(new_name).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be an organization administrator')
def test_unauthorized_name_change(self) -> None:
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings'
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {'full_name': 'New Iago'}
self.login('iago')
url = '/json/settings'
result = self.client_patch(url, data)
self.assert_in_success_response(['"full_name":"New Iago"'], result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
user = get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip')
do_change_realm_subdomain(realm, "newzulip")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.realm.string_id, "newzulip")
# This doesn't use a cache right now, but may later.
with self.assertRaises(Realm.DoesNotExist):
get_realm("zulip")
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user('hamlet')
send_future_email('zerver/emails/followup_day1', user.realm,
to_user_ids=[user.id], delay=datetime.timedelta(hours=1))
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_change_realm_description_clears_cached_descriptions(self) -> None:
realm = get_realm('zulip')
rendered_description = get_realm_rendered_description(realm)
text_description = get_realm_text_description(realm)
realm.description = 'New Description'
realm.save(update_fields=['description'])
new_rendered_description = get_realm_rendered_description(realm)
self.assertNotEqual(rendered_description, new_rendered_description)
self.assertIn(realm.description, new_rendered_description)
new_text_description = get_realm_text_description(realm)
self.assertNotEqual(text_description, new_text_description)
self.assertEqual(realm.description, new_text_description)
def test_do_deactivate_realm_on_deactivated_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_realm_reactivation_link(self) -> None:
realm = get_realm('zulip')
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_realm_reactivation_confirmation_object(self) -> None:
realm = get_realm('zulip')
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
confirmation = Confirmation.objects.last()
self.assertEqual(confirmation.content_object, realm)
self.assertEqual(confirmation.realm, realm)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm('zulip')
do_send_realm_reactivation_email(realm)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
self.assertRegex(
outbox[0].from_email,
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn('Reactivate your Zulip organization', outbox[0].subject)
self.assertIn('Dear former administrators', outbox[0].body)
admins = realm.get_human_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(['Your organization has been successfully reactivated'], response)
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(['The organization reactivation link has expired or is not valid.'], response)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
self.login('iago')
disabled_notif_stream_id = -1
req = dict(notifications_stream_id = orjson.dumps(disabled_notif_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id = orjson.dumps(new_notif_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id = orjson.dumps(invalid_notif_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
assert realm.notifications_stream is not None
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save(update_fields=["notifications_stream"])
notifications_stream = realm.get_notifications_stream()
assert notifications_stream is not None
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
self.login('iago')
disabled_signup_notifications_stream_id = -1
req = dict(signup_notifications_stream_id = orjson.dumps(disabled_signup_notifications_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = 4
req = dict(signup_notifications_stream_id = orjson.dumps(new_signup_notifications_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(signup_notifications_stream_id = orjson.dumps(invalid_signup_notifications_stream_id).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
assert realm.signup_notifications_stream is not None
self.assertNotEqual(realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
assert signup_notifications_stream is not None
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
self.login('iago')
req = dict(default_language=orjson.dumps(new_lang).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=orjson.dumps(invalid_lang).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, f"Invalid language '{invalid_lang}'")
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_owner(self) -> None:
self.login('desdemona')
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_owner(self) -> None:
self.login('iago')
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
result = self.client_post('/json/realm/deactivate')
self.assert_json_error(result, "Must be an organization owner")
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
def test_change_bot_creation_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(bot_creation_policy = orjson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_add_bot_permission = 4
req = dict(bot_creation_policy = orjson.dumps(invalid_add_bot_permission).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid bot_creation_policy')
def test_change_email_address_visibility(self) -> None:
# We need an admin user.
user_profile = self.example_user("iago")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.login_user(user_profile)
invalid_value = 12
req = dict(email_address_visibility = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid email_address_visibility')
reset_emails_in_zulip_realm()
realm = get_realm("zulip")
req = dict(email_address_visibility = orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver")
# Check normal user cannot access email
result = self.api_get(cordelia, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()['user']['email'],
f'user{hamlet.id}@zulip.testserver')
self.assertEqual(result.json()['user'].get('delivery_email'), None)
# Check administrator gets delivery_email with EMAIL_ADDRESS_VISIBILITY_ADMINS
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()['user']['email'],
f'user{hamlet.id}@zulip.testserver')
self.assertEqual(result.json()['user'].get('delivery_email'),
hamlet.delivery_email)
req = dict(email_address_visibility = orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver")
# Check even administrator doesn't get delivery_email with
# EMAIL_ADDRESS_VISIBILITY_NOBODY
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()['user']['email'],
f'user{hamlet.id}@zulip.testserver')
self.assertEqual(result.json()['user'].get('delivery_email'), None)
def test_change_stream_creation_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(create_stream_policy = orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(create_stream_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid create_stream_policy')
def test_change_invite_to_stream_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(invite_to_stream_policy = orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(invite_to_stream_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid invite_to_stream_policy')
def test_user_group_edit_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(user_group_edit_policy = orjson.dumps(Realm.USER_GROUP_EDIT_POLICY_ADMINS).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(user_group_edit_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid user_group_edit_policy')
def test_private_message_policy(self) -> None:
# We need an admin user.
self.login('iago')
req = dict(private_message_policy = orjson.dumps(Realm.PRIVATE_MESSAGE_POLICY_DISABLED).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
invalid_value = 10
req = dict(private_message_policy = orjson.dumps(invalid_value).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid private_message_policy')
def test_invalid_integer_attribute_values(self) -> None:
integer_values = [key for key, value in Realm.property_types.items() if value is int]
invalid_values = dict(
bot_creation_policy=10,
create_stream_policy=10,
invite_to_stream_policy=10,
email_address_visibility=10,
message_retention_days=10,
video_chat_provider=10,
waiting_period_threshold=-10,
digest_weekday=10,
user_group_edit_policy=10,
private_message_policy=10,
message_content_delete_limit_seconds=-10,
)
# We need an admin user.
self.login('iago')
for name in integer_values:
invalid_value = invalid_values.get(name)
if invalid_value is None:
raise AssertionError(f'No test created for {name}')
self.do_test_invalid_integer_attribute_value(name, invalid_value)
def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None:
possible_messages = {
f"Invalid {val_name}",
f"Bad value for '{val_name}'",
f"Bad value for '{val_name}': {invalid_val}",
f"Invalid {val_name} {invalid_val}",
}
req = {val_name: invalid_val}
result = self.client_patch('/json/realm', req)
msg = self.get_json_error(result)
self.assertTrue(msg in possible_messages)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
self.login('iago')
invalid_video_chat_provider_value = 10
req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_error(result,
("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value))
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['disabled']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS['disabled']['id'])
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'])
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['big_blue_button']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
self.assertEqual(get_realm('zulip').video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS['big_blue_button']['id'])
req = {"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['zoom']['id']).decode()}
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm('hosted', 'hosted').plan_type, Realm.LIMITED)
self.assertEqual(get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm('onpremise', 'onpremise').plan_type, Realm.SELF_HOSTED)
self.assertEqual(get_realm('onpremise').max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(get_realm('onpremise').message_visibility_limit, None)
self.assertEqual(get_realm("onpremise").upload_quota_gb, None)
def test_change_plan_type(self) -> None:
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
do_change_plan_type(realm, Realm.STANDARD)
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED)
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.LIMITED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
do_change_plan_type(realm, Realm.STANDARD_FREE)
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.STANDARD_FREE)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED)
do_change_plan_type(realm, Realm.SELF_HOSTED)
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
def test_message_retention_days(self) -> None:
self.login('iago')
realm = get_realm('zulip')
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Must be an organization owner")
self.login('desdemona')
req = dict(message_retention_days=orjson.dumps(0).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Bad value for 'message_retention_days': 0")
req = dict(message_retention_days=orjson.dumps(-10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(
result, "Bad value for 'message_retention_days': -10")
req = dict(message_retention_days=orjson.dumps('invalid').decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Bad value for 'message_retention_days': invalid")
req = dict(message_retention_days=orjson.dumps(-1).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -1")
req = dict(message_retention_days=orjson.dumps('forever').decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
do_change_plan_type(realm, Realm.LIMITED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_error(
result, "Available on Zulip Standard. Upgrade to access.")
do_change_plan_type(realm, Realm.STANDARD)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.login('desdemona')
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: int) -> Realm:
result = self.client_patch('/json/realm', {name: orjson.dumps(value).decode()})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm:
result = self.client_patch('/json/realm', data_dict)
self.assert_json_success(result)
return get_realm('zulip')
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
default_language=['de', 'en'],
default_code_block_language=['javascript', ''],
description=['Realm description', 'New description'],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=['Zulip', 'New Name'],
waiting_period_threshold=[10, 20],
create_stream_policy=[Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY],
user_group_edit_policy=[Realm.USER_GROUP_EDIT_POLICY_ADMINS,
Realm.USER_GROUP_EDIT_POLICY_MEMBERS],
private_message_policy=[Realm.PRIVATE_MESSAGE_POLICY_UNLIMITED,
Realm.PRIVATE_MESSAGE_POLICY_DISABLED],
invite_to_stream_policy=[Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY],
bot_creation_policy=[1, 2],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY],
video_chat_provider=[
dict(
video_chat_provider=orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id']).decode(),
),
],
message_content_delete_limit_seconds=[1000, 1100, 1200]
)
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f'No test created for {name}')
if name == 'video_chat_provider':
self.set_up_db(name, vals[0][name])
realm = self.update_with_api_multiple_value(vals[0])
self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name]))
else:
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
with self.subTest(property=prop):
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
self.set_up_db('allow_community_topic_editing', False)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
realm = self.update_with_api('allow_community_topic_editing', True)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api('allow_community_topic_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, False)
def test_update_realm_allow_message_deleting(self) -> None:
"""Tests updating the realm property 'allow_message_deleting'."""
self.set_up_db('allow_message_deleting', True)
self.set_up_db('message_content_delete_limit_seconds', 0)
realm = self.update_with_api('allow_message_deleting', False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api('allow_message_deleting', True)
realm = self.update_with_api('message_content_delete_limit_seconds', 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api('message_content_delete_limit_seconds', 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago, "Scotland")
self.send_stream_message(othello, "Scotland")
self.send_stream_message(cordelia, "Shakespeare")
self.send_stream_message(king, "Shakespeare")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512)
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512)
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512)
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512)
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with mock.patch('logging.warning'):
do_scrub_realm(zulip)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
| |
from django.test import TestCase, LiveServerTestCase
from django.test import Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from selenium import webdriver
import factory
import factory.django
from installer_config.models import EnvironmentProfile, UserChoice, Step
from selenium import webdriver
import os
TEST_DOMAIN_NAME = "http://127.0.0.1:8081"
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: u'username%d' % n)
class EnvironmentProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = EnvironmentProfile
title = factory.Sequence(lambda n: u'EnvironmentalProfile%d' % n)
user = factory.SubFactory(UserFactory)
class ChoiceFactory(factory.django.DjangoModelFactory):
class Meta:
model = UserChoice
name = factory.Sequence(lambda n: u'name%d' % n)
description = factory.Sequence(lambda n: u'description%d' % n)
category = 'core'
priority = 1
def login_user(driver, user, password):
"""login user"""
driver.get(TEST_DOMAIN_NAME + reverse('auth_login'))
username_field = driver.find_element_by_id('id_username')
username_field.send_keys(user)
password_field = driver.find_element_by_id('id_password')
password_field.send_keys(password)
form = driver.find_element_by_tag_name('form')
form.submit()
class UserProfileDetailTestCase(LiveServerTestCase):
"""This class is for testing user login form"""
def setUp(self):
self.driver = webdriver.Firefox()
super(UserProfileDetailTestCase, self).setUp
self.user = User(username='user1')
self.user.set_password('pass')
self.user.is_active = True
self.choice = []
for i in range(3):
self.choice.append(ChoiceFactory())
self.choice[i].save()
def tearDown(self):
self.driver.refresh()
self.driver.quit()
super(UserProfileDetailTestCase, self).tearDown()
def login_user(self, user, password):
"""login user"""
self.driver.get(TEST_DOMAIN_NAME + reverse('auth_login'))
username_field = self.driver.find_element_by_id('id_username')
username_field.send_keys(user)
password_field = self.driver.find_element_by_id('id_password')
password_field.send_keys(password)
form = self.driver.find_element_by_tag_name('form')
form.submit()
def test_create_profile_all(self):
"""If all choices selected, all are in the created profile"""
# .save() is here instead of setUp to save time
self.user.save()
self.login_user('user1', 'pass')
self.driver.get(TEST_DOMAIN_NAME +
reverse('installer_config:CreateEnv'))
self.assertIn("profileform", self.driver.page_source)
# fill out form
description = "Test description."
field = self.driver.find_element_by_id('id_description')
field.send_keys(description)
for i in range(3):
choice = "".join(['id_choices_', str(i)])
field = self.driver.find_element_by_id(choice)
field.click()
form = self.driver.find_element_by_tag_name('form')
form.submit()
# check if profile is created
self.assertIn("userprofile", self.driver.page_source)
self.assertIn(description, self.driver.page_source)
# check script has the choices
link = self.driver.find_elements_by_link_text('Test description.')
link[0].click()
for i in range(3):
self.assertIn(self.choice[i].name, self.driver.page_source)
self.assertIn(self.choice[i].description, self.driver.page_source)
def test_create_profile_not_all(self):
"""If not all choices selected, the right ones are produced"""
# .save() is here instead of setUp to save time
self.user.save()
self.login_user('user1', 'pass')
self.driver.get(TEST_DOMAIN_NAME +
reverse('installer_config:CreateEnv'))
self.assertIn("profileform", self.driver.page_source)
# fill out form
description = "Test description."
field = self.driver.find_element_by_id('id_description')
field.send_keys(description)
for i in range(2):
choice = "".join(['id_choices_', str(i)])
field = self.driver.find_element_by_id(choice)
field.click()
form = self.driver.find_element_by_tag_name('form')
form.submit()
# check if profile is created
self.assertIn("userprofile", self.driver.page_source)
self.assertIn(description, self.driver.page_source)
# check script has the choices
link = self.driver.find_elements_by_link_text('Test description.')
link[0].click()
for i in range(2):
self.assertIn(self.choice[i].name, self.driver.page_source)
self.assertIn(self.choice[i].description, self.driver.page_source)
# if not selected, then not in page that displays choices
self.assertNotIn(self.choice[2].name, self.driver.page_source)
self.assertNotIn(self.choice[2].description, self.driver.page_source)
# Will use these to write update and delete tests
# def test_update_profile(self):
# # .save() is here instead of setUp to save time
# self.user.save()
# login_user(self.driver, 'user1', 'pass')
# self.driver.get(TEST_DOMAIN_NAME +
# reverse('installer_config:UpdateEnv',
# kwargs={'pk': self.user.pk}))
# self.assertIn("profileform", self.driver.page_source)
# def test_delete_profile(self):
# # .save() is here instead of setUp to save time
# self.user.save()
# login_user(self.driver, 'user1', 'pass')
# self.driver.get(TEST_DOMAIN_NAME +
# reverse('installer_config:DeleteEnv',
# kwargs={'pk': self.user.pk}))
# self.assertIn("profileform", self.driver.page_source)
class DownloadFileFormationTest(TestCase):
def setUp(self):
self.user = User(username='n00b')
self.user.set_password('...')
self.user.is_active = True
self.client = Client()
def tearDown(self):
pass
def test_choice_presence_set1(self):
# Verify the presence of the corresponding code in the downloaded
# generated python script
self.user.save()
inputs, profiles, choices = set_data(self.user)
response = self.client.get(reverse('installer_config:download_profile', kwargs={'pk': profiles[0].pk}))
import pdb; pdb.set_choice()
# Verify that choices selected are present
self.assertIn('# For choice important thing', response.content)
self.assertIn('# For choice your env', response.content)
self.assertIn('# For choice get', response.content)
# Check that the steps for choices selected and only choices selected
# for a given environment are present in the generated python file
self.assertIn('# Download and run', response.content)
self.assertIn('# Edit a file', response.content)
self.assertIn('# Edit a profile', response.content)
# Verify choices that don't belong are not present
self.assertNotIn('# Add a key, value pair', response.content)
self.assertNotIn('"Executing " + \' \'.join(command_line)', response.content)
self.assertNotIn('# Pip install, assuming', response.content)
def test_choice_presence_set2(self):
self.user.save()
inputs, profiles, choices = set_data(self.user)
response = self.client.get(reverse('installer_config:download_profile', kwargs={'pk': profiles[1].pk}))
self.assertIn('# For choice bash shenanigannns', response.content)
self.assertIn('# For choice text editor', response.content)
self.assertIn('# Add a key, value pair', response.content)
self.assertIn('"Executing " + \' \'.join(command_line)', response.content)
self.assertIn('# Pip install, assuming', response.content)
self.assertNotIn('# Download and run', response.content)
self.assertNotIn('# Edit a file', response.content)
self.assertNotIn('# Edit a profile', response.content)
def test_choice_presence_set3(self):
self.user.save()
inputs, profiles, choices = set_data(self.user)
response = self.client.get(reverse('installer_config:download_profile', kwargs={'pk': profiles[2].pk}))
self.assertIn('# For choice a pip package', response.content)
self.assertIn('# For choice other', response.content)
# Verify no steps for this set of choices
self.assertNotIn('# Download and run', response.content)
self.assertNotIn('# Edit a file', response.content)
self.assertNotIn('# Edit a profile', response.content)
self.assertNotIn('# Add a key, value pair', response.content)
self.assertNotIn('"Executing " + \' \'.join(command_line)', response.content)
self.assertNotIn('# Pip install, assuming', response.content)
def set_data(user):
inputs = [('important thing', 'core', 1),
('your env', 'env', 1),
('get', 'git', 1),
('bash shenanigannns', 'prompt', 2),
('text editor', 'subl', 2),
('a pip package', 'pkg', 3),
('other', 'other', 3),]
choices = []
for name, category, priority in inputs:
choices.append(UserChoice(name=name, category=category, priority=priority))
choices[-1].save()
# Set up steps association with choices
for choice in UserChoice.objects.filter(priority=1):
Step(step_type='dl', user_choice=choice).save()
Step(step_type='edfile', user_choice=choice).save()
Step(step_type='edprof', user_choice=choice).save()
for choice in UserChoice.objects.filter(priority=2):
Step(step_type='env', user_choice=choice).save()
Step(step_type='exec', user_choice=choice).save()
Step(step_type='pip', user_choice=choice).save()
profiles = [
EnvironmentProfile(user=user, description='oneses'),#, choices=UserChoice.objects.filter(priority=1)),
EnvironmentProfile(user=user, description='twos'),#, choices=UserChoice.objects.filter(priority=2)),
EnvironmentProfile(user=user, description='threes'),#, choices=UserChoice.objects.filter(priority=3)),
]
for order, profile in enumerate(profiles):
profile.save()
sub_choices = UserChoice.objects.filter(priority=order+1)
for item in sub_choices:
profile.choices.add(item)
return inputs, profiles, choices
class UserProfileShowTestCase(LiveServerTestCase):
"""User profiles and choices display properly"""
def setUp(self):
self.driver = webdriver.Firefox()
super(UserProfileShowTestCase, self).setUp
self.user = User(username='user1')
self.user.set_password('pass')
self.user.is_active = True
self.client = Client()
def tearDown(self):
self.driver.refresh()
self.driver.quit()
super(UserProfileShowTestCase, self).tearDown()
def test_show_profile_all(self):
"""Profiles are in the created profile list."""
# .save() is here instead of setUp to save time
self.user.save()
login_user(self.driver, 'user1', 'pass')
self.profiles = set_data(self.user)[1]
self.driver.implicitly_wait(2)
self.driver.get(TEST_DOMAIN_NAME + reverse('profile'))
# make sure all profiles are in profile page
for profile in self.profiles:
self.assertIn(profile.description, self.driver.page_source)
def test_show_profile_choices(self):
"""Test for all choices in each profile list"""
# .save() is here instead of setUp to save time
self.user.save()
login_user(self.driver, 'user1', 'pass')
self.profiles = set_data(self.user)[1]
self.driver.implicitly_wait(2)
# go to each profile page and see if all choices are in them
for profile in self.profiles:
self.driver.get(TEST_DOMAIN_NAME + reverse('profile'))
link = self.driver.find_elements_by_link_text(
profile.description)
link[0].click()
for choice in profile.choices.all():
self.assertIn(choice.description, self.driver.page_source)
class UserProfileDownloadTestCase(LiveServerTestCase):
"""User profile downloading properly"""
def setUp(self):
self.driver = webdriver.Firefox()
super(UserProfileDownloadTestCase, self).setUp
self.user = User(username='user1')
self.user.set_password('pass')
self.user.is_active = True
self.client = Client()
def tearDown(self):
self.driver.refresh()
self.driver.quit()
super(UserProfileDownloadTestCase, self).tearDown()
def test_show_profile_choices(self):
"""Test that download link exists for all choices in each profile list"""
# .save() is here instead of setUp to save time
self.user.save()
login_user(self.driver, 'user1', 'pass')
self.profiles = set_data(self.user)[1]
self.driver.implicitly_wait(2)
# go to each profile page and see if all choices are in them
for profile in self.profiles:
self.driver.get(TEST_DOMAIN_NAME + reverse('profile'))
link = self.driver.find_elements_by_link_text(
profile.description)
link[0].click()
# find the download link inside the profile detail page
link = self.driver.find_elements_by_link_text('')
self.assertTrue(link)
| |
"""
Getis and Ord G statistic for spatial autocorrelation
"""
__author__ = "Sergio J. Rey <srey@asu.edu>, Myunghwa Hwang <mhwang4@gmail.com> "
__all__ = ['G', 'G_Local']
from pysal.common import np, stats, math
from pysal.weights.spatial_lag import lag_spatial as slag
PERMUTATIONS = 999
class G:
"""
Global G Autocorrelation Statistic
Parameters
----------
y : array (n,1)
Attribute values
w : W
DistanceBand W spatial weights based on distance band
permutations : int
the number of random permutations for calculating pseudo p_values
Attributes
----------
y : array
original variable
w : W
DistanceBand W spatial weights based on distance band
permutation : int
the number of permutations
G : float
the value of statistic
EG : float
the expected value of statistic
VG : float
the variance of G under normality assumption
z_norm : float
standard normal test statistic
p_norm : float
p-value under normality assumption (one-sided)
sim : array
(if permutations > 0)
vector of G values for permutated samples
p_sim : float
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed G is extreme it is either extremely high or extremely low
EG_sim : float
average value of G from permutations
VG_sim : float
variance of G from permutations
seG_sim : float
standard deviation of G under permutations.
z_sim : float
standardized G based on permutations
p_z_sim : float
p-value based on standard normal approximation from
permutations (one-sided)
Notes
-----
Moments are based on normality assumption.
Examples
--------
>>> from pysal.weights.Distance import DistanceBand
>>> import numpy
>>> numpy.random.seed(10)
Preparing a point data set
>>> points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a weights object from points
>>> w = DistanceBand(points,threshold=15)
>>> w.transform = "B"
Preparing a variable
>>> y = numpy.array([2, 3, 3.2, 5, 8, 7])
Applying Getis and Ord G test
>>> g = G(y,w)
Examining the results
>>> print "%.8f" % g.G
0.55709779
>>> print "%.4f" % g.p_norm
0.1729
"""
def __init__(self, y, w, permutations=PERMUTATIONS):
self.n = len(y)
self.y = y
w.transform = "B"
self.w = w
self.permutations = permutations
self.__moments()
self.y2 = y * y
y = y.reshape(len(y), 1) # Ensure that y is an n by 1 vector, otherwise y*y.T == y*y
self.den_sum = (y * y.T).sum() - (y * y).sum()
self.G = self.__calc(self.y)
self.z_norm = (self.G - self.EG) / math.sqrt(self.VG)
self.p_norm = 1.0 - stats.norm.cdf(np.abs(self.z_norm))
if permutations:
sim = [self.__calc(np.random.permutation(self.y))
for i in xrange(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.G
larger = sum(above)
if (self.permutations - larger) < larger:
larger = self.permutations - larger
self.p_sim = (larger + 1.0) / (permutations + 1.)
self.EG_sim = sum(sim) / permutations
self.seG_sim = sim.std()
self.VG_sim = self.seG_sim ** 2
self.z_sim = (self.G - self.EG_sim) / self.seG_sim
self.p_z_sim = 1. - stats.norm.cdf(np.abs(self.z_sim))
def __moments(self):
y = self.y
n = self.n
w = self.w
n2 = n * n
s0 = w.s0
self.EG = s0 / (n * (n - 1))
s02 = s0 * s0
s1 = w.s1
s2 = w.s2
b0 = (n2 - 3 * n + 3) * s1 - n * s2 + 3 * s02
b1 = (-1.) * ((n2 - n) * s1 - 2 * n * s2 + 6 * s02)
b2 = (-1.) * (2 * n * s1 - (n + 3) * s2 + 6 * s02)
b3 = 4 * (n - 1) * s1 - 2 * (n + 1) * s2 + 8 * s02
b4 = s1 - s2 + s02
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.b3 = b3
self.b4 = b4
y2 = y * y
y3 = y * y2
y4 = y2 * y2
EG2 = (b0 * (sum(
y2) ** 2) + b1 * sum(y4) + b2 * (sum(y) ** 2) * sum(y2))
EG2 += b3 * sum(y) * sum(y3) + b4 * (sum(y) ** 4)
EG2NUM = EG2
EG2DEN = (((sum(y) ** 2 - sum(y2)) ** 2) * n * (n - 1) * (
n - 2) * (n - 3))
self.EG2 = EG2NUM / EG2DEN
self.VG = self.EG2 - self.EG ** 2
def __calc(self, y):
yl = slag(self.w, y)
self.num = y * yl
return self.num.sum() / self.den_sum
class G_Local:
"""
Generalized Local G Autocorrelation Statistic
Parameters
----------
y : array
variable
w : W
DistanceBand, weights instance that is based on threshold distance
and is assumed to be aligned with y
transform : {'R', 'B'}
the type of w, either 'B' (binary) or 'R' (row-standardized)
permutations : int
the number of random permutations for calculating
pseudo p values
star : boolean
whether or not to include focal observation in sums (default: False)
Attributes
----------
y : array
original variable
w : DistanceBand W
original weights object
permutations: int
the number of permutations
Gs : array
of floats, the value of the orginal G statistic in Getis & Ord (1992)
EGs : float
expected value of Gs under normality assumption
the values is scalar, since the expectation is identical
across all observations
VGs : array
of floats, variance values of Gs under normality assumption
Zs : array
of floats, standardized Gs
p_norm : array
of floats, p-value under normality assumption (one-sided)
for two-sided tests, this value should be multiplied by 2
sim : array
of arrays of floats (if permutations>0), vector of I values
for permutated samples
p_sim : array
of floats, p-value based on permutations (one-sided)
null - spatial randomness
alternative - the observed G is extreme it is either extremely high or extremely low
EG_sim : array
of floats, average value of G from permutations
VG_sim : array
of floats, variance of G from permutations
seG_sim : array
of floats, standard deviation of G under permutations.
z_sim : array
of floats, standardized G based on permutations
p_z_sim : array
of floats, p-value based on standard normal approximation from
permutations (one-sided)
Notes
-----
To compute moments of Gs under normality assumption,
PySAL considers w is either binary or row-standardized.
For binary weights object, the weight value for self is 1
For row-standardized weights object, the weight value for self is
1/(the number of its neighbors + 1).
References
----------
Getis, A. and Ord., J.K. (1992) The analysis of spatial association by use of
distance statistics. Geographical Analysis, 24(3):189-206
Ord, J.K. and Getis, A. (1995) Local spatial autocorrelation statistics:
distributional issues and an application. Geographical Analysis, 27(4):286-306
Getis, A. and Ord, J. K. (1996) Local spatial statistics: an overview,
in Spatial Analysis: Modelling in a GIS Environment, edited by Longley, P.
and Batty, M.
Examples
--------
>>> from pysal.weights.Distance import DistanceBand
>>> import numpy
>>> numpy.random.seed(10)
Preparing a point data set
>>> points = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a weights object from points
>>> w = DistanceBand(points,threshold=15)
Prepareing a variable
>>> y = numpy.array([2, 3, 3.2, 5, 8, 7])
Applying Getis and Ord local G test using a binary weights object
>>> lg = G_Local(y,w,transform='B')
Examining the results
>>> lg.Zs
array([-1.0136729 , -0.04361589, 1.31558703, -0.31412676, 1.15373986,
1.77833941])
>>> lg.p_sim[0]
0.10100000000000001
>>> numpy.random.seed(10)
Applying Getis and Ord local G* test using a binary weights object
>>> lg_star = G_Local(y,w,transform='B',star=True)
Examining the results
>>> lg_star.Zs
array([-1.39727626, -0.28917762, 0.65064964, -0.28917762, 1.23452088,
2.02424331])
>>> lg_star.p_sim[0]
0.10100000000000001
>>> numpy.random.seed(10)
Applying Getis and Ord local G test using a row-standardized weights object
>>> lg = G_Local(y,w,transform='R')
Examining the results
>>> lg.Zs
array([-0.62074534, -0.01780611, 1.31558703, -0.12824171, 0.28843496,
1.77833941])
>>> lg.p_sim[0]
0.10100000000000001
>>> numpy.random.seed(10)
Applying Getis and Ord local G* test using a row-standardized weights object
>>> lg_star = G_Local(y,w,transform='R',star=True)
Examining the results
>>> lg_star.Zs
array([-0.62488094, -0.09144599, 0.41150696, -0.09144599, 0.24690418,
1.28024388])
>>> lg_star.p_sim[0]
0.10100000000000001
"""
def __init__(self, y, w, transform='R', permutations=PERMUTATIONS, star=False):
self.n = len(y)
self.y = y
self.w = w
self.w_original = w.transform
self.w.transform = self.w_transform = transform.lower()
self.permutations = permutations
self.star = star
self.calc()
self.p_norm = np.array(
[1 - stats.norm.cdf(np.abs(i)) for i in self.Zs])
if permutations:
self.__crand()
sim = np.transpose(self.rGs)
above = sim >= self.Gs
larger = sum(above)
low_extreme = (self.permutations - larger) < larger
larger[low_extreme] = self.permutations - larger[low_extreme]
self.p_sim = (larger + 1.0) / (permutations + 1)
self.sim = sim
self.EG_sim = sim.mean()
self.seG_sim = sim.std()
self.VG_sim = self.seG_sim * self.seG_sim
self.z_sim = (self.Gs - self.EG_sim) / self.seG_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def __crand(self):
y = self.y
rGs = np.zeros((self.n, self.permutations))
n_1 = self.n - 1
rid = range(n_1)
prange = range(self.permutations)
k = self.w.max_neighbors + 1
rids = np.array([np.random.permutation(rid)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
wc = self.__getCardinalities()
if self.w_transform == 'r':
den = np.array(wc) + self.star
else:
den = np.ones(self.w.n)
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
yi_star = y[i] * self.star
wci = wc[i]
rGs[i] = (y[idsi[rids[:, 0:wci]]]).sum(1) + yi_star
rGs[i] = (np.array(rGs[i]) / den[i]) / (
self.y_sum - (1 - self.star) * y[i])
self.rGs = rGs
def __getCardinalities(self):
ido = self.w.id_order
self.wc = np.array(
[self.w.cardinalities[ido[i]] for i in range(self.n)])
return self.wc
def calc(self):
y = self.y
y2 = y * y
self.y_sum = y_sum = sum(y)
y2_sum = sum(y2)
if not self.star:
yl = 1.0 * slag(self.w, y)
ydi = y_sum - y
self.Gs = yl / ydi
N = self.n - 1
yl_mean = ydi / N
s2 = (y2_sum - y2) / N - (yl_mean) ** 2
else:
self.w.transform = 'B'
yl = 1.0 * slag(self.w, y)
yl += y
if self.w_transform == 'r':
yl = yl / (self.__getCardinalities() + 1.0)
self.Gs = yl / y_sum
N = self.n
yl_mean = y.mean()
s2 = y.var()
EGs_num, VGs_num = 1.0, 1.0
if self.w_transform == 'b':
W = self.__getCardinalities()
W += self.star
EGs_num = W * 1.0
VGs_num = (W * (1.0 * N - W)) / (1.0 * N - 1)
self.EGs = (EGs_num * 1.0) / N
self.VGs = (VGs_num) * (1.0 / (N ** 2)) * ((s2 * 1.0) / (yl_mean ** 2))
self.Zs = (self.Gs - self.EGs) / np.sqrt(self.VGs)
self.w.transform = self.w_original
| |
#cython: profile=False
#cython: cdivision=True
#cython: wraparound=False
#cython: boundscheck=False
# filename: lcacx.pyx
import numpy as np
#################
## Cython mode ##
#################
cimport numpy as np
cimport cython
CTYPE = np.complex
FTYPE = np.float
ctypedef np.complex_t CTYPE_t
ctypedef double complex DCTYPE_t
ctypedef np.float_t FTYPE_t
ctypedef np.int_t ITYPE_t
def lca(np.ndarray[DCTYPE_t, ndim=3] Z_in not None,
np.ndarray[FTYPE_t, ndim=2] wrf_in not None):
cdef int Ny
cdef int Nx
cdef int Nw
cdef np.ndarray[DCTYPE_t, ndim=3] Z
cdef int Y_AVG
cdef int Y_MID
cdef int X_AVG
cdef int X_MID
cdef np.ndarray[FTYPE_t, ndim=2] wrf
cdef np.ndarray[DCTYPE_t, ndim=1] new_y
cdef np.ndarray[ITYPE_t, ndim=1] new_w
cdef np.ndarray[FTYPE_t, ndim=2] img_y_segment_abs
cdef np.ndarray[FTYPE_t, ndim=1] img_y_segment_abs_sum
cdef FTYPE_t w_sum_min
cdef np.ndarray[FTYPE_t, ndim=1] w_sum
cdef np.ndarray[DCTYPE_t, ndim=2] img_y
cdef np.ndarray[FTYPE_t, ndim=2] img_y_abs
cdef np.ndarray[DCTYPE_t, ndim=2] new_img
cdef np.ndarray[ITYPE_t, ndim=2] new_win
cdef int x,y,w,xx,yy
# For XY-averaging:
cdef np.ndarray[FTYPE_t, ndim=3] img_abs
#################
## Python mode ##
#################
#def lca(Z_in, wrf_in):
## End ##
Ny = Z_in.shape[0]
Nx = Z_in.shape[1]
Nw = Z_in.shape[2]
Z = Z_in
if wrf_in.ndim != 2:
print "You must specify a 2D 'wrf' filter. A (1,1) filter with the value 1 equals no filtering."
return
Y_AVG = wrf_in.shape[0]
Y_MID = (Y_AVG-1)/2
X_AVG = wrf_in.shape[1]
X_MID = (X_AVG-1)/2
wrf = wrf_in
# The filter must have odd number of rows/columns
if Y_AVG%2==0 or X_AVG%2==0:
print "The 'wrf' filter must have odd number of rows/columns."
return
# If only range-values are supplied, and these are all 1's, then set the
# 'Y_ONLY_ONES' flag which will be used later to simplify computations.
if X_AVG == 1:
Y_ONLY_ONES = True
for y in range(Y_AVG):
if wrf[y,0] != 1:
Y_ONLY_ONES = False
else:
Y_ONLY_ONES = False
new_y = np.zeros((Ny,), dtype=complex)
new_w = np.zeros((Ny,), dtype=int)
img_y_segment_abs = np.zeros((Nw,2*Y_AVG), dtype=float)
img_y_segment_abs_sum = np.zeros((Nw,), dtype=float)
w_sum = np.zeros((Nw,), dtype=float)
w_sum_min = 0
img_y = np.zeros((Nw,Ny+2*Y_MID), dtype=complex)
img_y_abs = np.zeros((Nw,Ny+2*Y_MID), dtype=float)
new_img = np.zeros((Ny,Nx), dtype=complex)
new_win = np.zeros((Ny,Nx), dtype=int)
# For XY-averaging:
img_abs = np.zeros((Nw,Ny,Nx), dtype=float)
# Select the window that yield the least power
if Y_AVG == 1 and X_AVG == 1 and wrf[0,0]:
l = range(0,Ny)
m = range(0,Nx)
l,m = np.meshgrid(range(0,Nx),range(0,Ny))
selected_window = np.abs(Z).argmin(2)
return Z[m,l,selected_window], selected_window
###############################
# Perform 'window averaging'. #
###############################
# A way to make the beamformer estimate a pixel more accurately, the beamformer output
# may be computed for a 'window' of pixels around the one we wish to image, and the window
# that gave the overall lowest output power may be applied to the center pixel.
#
# It is common that the averaging window is comprised of only ones, and no averaging is
# required in azimuth. That mode is handled first, and will be less computationally
# intensive than the next 'else if' which handles arbitrary averaging windows.
elif Y_ONLY_ONES:
# Iterate over all azimuth coordinates
for x in range (Nx):
# Compute the power of each range pixel (square them)
for y in range(Ny):
for w in range(Nw):
img_y[w,y] = Z[y,x,w]
img_y_abs[w,y] = img_y[w,y].real**2 + img_y[w,y].imag**2
# Compute the beamformer output for the first y-segment
for w in range(Nw):
w_sum_min = 0
w_sum[w] = 0
for y in range(Y_AVG):
w_sum[w] += img_y_abs[w,y]
# Select the window that yielded the minimum output power of the beamformer
if w_sum[w] < w_sum_min or w==0:
w_sum_min = w_sum[w]
new_win[Y_AVG,x] = w
new_img[Y_AVG,x] = img_y[w,Y_AVG]
# Select a range segment:
for y in range(1+Y_MID,Ny-Y_MID):
# Compute the beamformer output for each of the windows
for w in range(Nw):
w_sum[w] += img_y_abs[w,y+Y_MID] - img_y_abs[w,y-Y_MID-1]
# Select the window that yielded the minimum output power of the beamformer
if w_sum[w] < w_sum_min or w==0:
w_sum_min = w_sum[w]
new_win[y,x] = w
new_img[y,x] = img_y[w,y]
return new_img, new_win
# Handle arbitrary window functions:
elif Y_AVG != 0 and X_AVG != 0:
# Compute the image absolute value
for y in range(Ny):
for x in range(Nx):
for w in range(Nw):
img_abs[w,y,x] = Z[y,x,w].real**2 + Z[y,x,w].imag**2
# for w in range(Nw):
# img_abs[w,:,:] = Z[:,:,w]**2
# Select a range segment:
for y in range(Y_MID,Ny-Y_MID):
# Select an azimuth segment:
for x in range(X_MID,Nx-X_MID):
# Compute the accumulated beamformer output for each of the windows
for w in range(Nw):
w_sum[w] = 0
for yy in range(2*Y_MID+1):
for xx in range(2*X_MID+1):
w_sum[w] += img_abs[w,y+yy-Y_MID,x+xx-X_MID]*wrf[yy,xx]
# Select the window that yielded the minimum output power of the beamformer
if w_sum[w] < w_sum_min or w==0:
w_sum_min = w_sum[w]
new_win[y,x] = w
new_img[y,x] = Z[y,x,w]
return new_img, new_win
| |
from datetime import timezone
from unittest import mock
from unittest.mock import patch
import pytest
from requests.exceptions import MissingSchema, InvalidSchema, SSLError, InvalidURL, HTTPError
from requests import Response
from CommonServerPython import *
API_TOKEN = 'API Token for FireEye'
CONTENT_TYPE_JSON = 'application/json'
SAMPLE_URL = 'https://sample.api.com'
AUTHENTICATION_RESP_HEADER = {
'X-FeApi-Token': API_TOKEN,
'Content-Type': CONTENT_TYPE_JSON
}
MOCK_INTEGRATION_CONTEXT = {
'api_token': API_TOKEN,
'valid_until': time.time() + 900
}
PARAMS = {
'url': SAMPLE_URL,
'fetch_limit': 10,
'firstFetchTimestamp': '1 hour'
}
MOCK_TEST_URL_SUFFIX = '/test/url/suffix'
ALERT_ID_TYPE_ERROR = 'The given value for alert_id is invalid. Expected integer value.'
ALERT_DETAILS_REPORT = 'Alert Details Report'
CONTENT_TYPE_ZIP = 'application/zip'
''' HELPER FUNCTION'''
@pytest.fixture()
def client():
from FireEyeNX import Client
return Client(base_url=SAMPLE_URL,
verify=False,
proxy=False,
auth=('username', 'password'),
request_timeout=60)
def mock_http_response(status=200, headers=None, json_data=None, raise_for_status=None, text=None, content=None):
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code
mock_resp.status_code = status
# add header if provided
mock_resp.text = text
mock_resp.content = content
if headers:
mock_resp.headers = headers
mock_resp.ok = True if status < 400 else False
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
class MockResponse:
def __init__(self, content, headers, status_code):
self.content = content
self.status_code = status_code
self.headers = headers
def text(self):
return self.content
def json(self):
return json.loads(self.content)
def raise_for_status(self):
if self.status_code != 200:
raise HTTPError('test')
''' Unit Test Cases '''
@patch('FireEyeNX.Client.http_request')
@patch('demistomock.getIntegrationContext')
@patch('demistomock.setIntegrationContext')
def test_get_api_token_when_not_found_in_integration_context(mocker_set_context, mocker_get_context, mock_request,
client):
"""
When get_api_token method called and headers is set with X-FeApi-Token also call_count is one,
it should match.
"""
mocker_get_context.return_value = {}
mocker_set_context.return_value = {}
mock_request.return_value = mock_http_response(status=200, headers=AUTHENTICATION_RESP_HEADER, text='')
api_token = client.get_api_token()
assert api_token == AUTHENTICATION_RESP_HEADER['X-FeApi-Token']
assert mocker_set_context.call_count == 1
@patch('FireEyeNX.Client._http_request')
@patch('demistomock.getIntegrationContext')
@patch('demistomock.setIntegrationContext')
def test_get_api_token_when_found_in_integration_context(mocker_set_context, mocker_get_context, mock_request,
client):
"""
When get_api_token method called and headers is set with X-FeApi-Token also call_count is zero, it should match.
"""
mocker_get_context.return_value = MOCK_INTEGRATION_CONTEXT
mocker_set_context.return_value = {}
mock_request.return_value = mock_http_response(status=200, headers=AUTHENTICATION_RESP_HEADER, text='')
api_token = client.get_api_token()
assert api_token == AUTHENTICATION_RESP_HEADER['X-FeApi-Token']
assert mocker_set_context.call_count == 0
@patch('FireEyeNX.BaseClient._http_request')
def test_http_request_invalid_schema_error(mock_base_http_request, client):
"""
When http request return invalid schema exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = InvalidSchema
# Execute
with pytest.raises(ValueError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'Invalid API URL. Supplied schema is invalid, supports http(s).'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_proxy_error(mock_base_http_request, client):
"""
When http request return proxy error with exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = DemistoException('Proxy Error')
# Execute
with pytest.raises(ConnectionError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'Proxy Error - cannot connect to proxy. Either try clearing the \'Use system proxy\'' \
' check-box or check the host, authentication details and connection details for the proxy.'
@patch('FireEyeNX.Client._http_request')
def test_http_request_connection_error(mock_base_http_request, client):
"""
When http request return connection error with Demisto exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = DemistoException('ConnectionError')
# Execute
with pytest.raises(ConnectionError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'Connectivity failed. Check your internet connection or the API URL.'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_request_read_timeout_error(mock_base_http_request, client):
"""
When http request return connection error with Demisto exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = DemistoException('ReadTimeoutError')
# Execute
with pytest.raises(ConnectionError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'Request timed out. Check the configured HTTP(S) Request Timeout (in seconds) value.'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_ssl_error(mock_base_http_request, client):
"""
When http request return ssl error with Demisto exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = DemistoException('SSLError')
# Execute
with pytest.raises(SSLError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox ' \
'in the integration configuration.'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_request_missing_schema_error(mock_base_http_request, client):
"""
When http request return MissingSchema exception then appropriate error message should display.
"""
# Configure
mock_base_http_request.side_effect = MissingSchema
# Execute
with pytest.raises(ValueError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'Invalid API URL. No schema supplied: http(s).'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_request_invalid_url_error(mock_base_http_request, client):
"""
When http request return invalid url exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = InvalidURL
# Execute
with pytest.raises(ValueError) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'Invalid API URL.'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_request_other_demisto_exception(mock_base_http_request, client):
"""
When http request return other custom Demisto exception then appropriate error message should match.
"""
# Configure
mock_base_http_request.side_effect = DemistoException('custom')
# Execute
with pytest.raises(Exception) as e:
client.http_request('GET', MOCK_TEST_URL_SUFFIX)
# Assert
assert str(e.value) == 'custom'
def test_main_success(mocker):
"""
When main function called test function should call.
"""
import FireEyeNX
mocker.patch.object(demisto, 'params', return_value=PARAMS)
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(FireEyeNX, 'test_function', return_value='ok')
FireEyeNX.main()
assert FireEyeNX.test_function.called
def test_main_all_argunment_should_strip(mocker):
import FireEyeNX
mocker.patch.object(demisto, 'params', return_value=PARAMS)
mocker.patch.object(demisto, 'command', return_value='fireeye-nx-get-alerts')
mocker.patch.object(FireEyeNX, 'get_alerts_command', return_value='ok')
args = {
'malware_name': ' malware_name ',
'malware_type': ' domain_match ',
'url': SAMPLE_URL
}
actual_output = {
'malware_name': 'malware_name',
'malware_type': 'domain_match',
'url': SAMPLE_URL
}
mocker.patch.object(demisto, 'args', return_value=args)
FireEyeNX.main()
assert args == actual_output
def test_main_when_fetch_incident_called_it_should_called_fetch_incident_method(mocker):
import FireEyeNX
mocker.patch.object(demisto, 'params', return_value=PARAMS)
mocker.patch.object(demisto, 'command', return_value='fetch-incidents')
mocker.patch.object(FireEyeNX, 'fetch_incidents', return_value='ok')
mocker.patch.object(demisto, 'setLastRun', return_value='')
mocker.patch.object(demisto, 'incidents', return_value='')
FireEyeNX.main()
@patch('FireEyeNX.return_error')
def test_main_failure(mock_return_error, mocker):
"""
When main function get some exception then valid message should be print.
"""
import FireEyeNX
mocker.patch.object(demisto, 'error')
mocker.patch.object(demisto, 'params', return_value=PARAMS)
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(FireEyeNX, 'test_function', side_effect=Exception)
FireEyeNX.main()
mock_return_error.assert_called_once_with('Error: ')
@patch('FireEyeNX.BaseClient._http_request')
def test_module_success_without_test_fetch_incident(mock_request, client):
"""
When test_function called with status code 200 without is_fetch=false, it successful return ok.
"""
from FireEyeNX import test_function
mock_request.return_value = mock_http_response(status=200, headers=AUTHENTICATION_RESP_HEADER, text='')
first_fetch_time = '12 hours'
fetch_limit = '1'
malware_type = ''
is_fetch = False
fetch_type = 'Alert'
mvx_correlated = False
replace_alert_url = False
instance_url = SAMPLE_URL
fetch_artifacts = False
resp = test_function(client=client, first_fetch_time=first_fetch_time, fetch_limit=fetch_limit,
malware_type=malware_type, is_fetch=is_fetch, fetch_type=fetch_type,
mvx_correlated=mvx_correlated, replace_alert_url=replace_alert_url, instance_url=instance_url,
fetch_artifacts=fetch_artifacts)
assert resp == 'ok'
@patch('FireEyeNX.get_incidents_for_alert')
@patch('FireEyeNX.BaseClient._http_request')
def test_module_success_with_fetch_incident(mock_get_alert, mock_request, client):
"""
When test_function called with status code 200 without is_fetch=False, it successful return ok.
"""
from FireEyeNX import test_function
mock_get_alert.return_value = []
mock_last_run = {
'start_time': datetime.now().replace(tzinfo=timezone.utc).timestamp()
}
mock_request.return_value = mock_http_response(status=200, headers=AUTHENTICATION_RESP_HEADER, text='')
first_fetch_time = '12 hours'
fetch_limit = '1'
malware_type = None
is_fetch = True
fetch_type = 'Alert'
mvx_correlated = False
replace_alert_url = False
instance_url = SAMPLE_URL
fetch_artifacts = False
resp = test_function(client=client, first_fetch_time=first_fetch_time, fetch_limit=fetch_limit,
malware_type=malware_type,
is_fetch=is_fetch, fetch_type=fetch_type,
mvx_correlated=mvx_correlated, replace_alert_url=replace_alert_url, instance_url=instance_url,
fetch_artifacts=fetch_artifacts, last_run=mock_last_run)
assert resp == 'ok'
def test_validate_date_range_failure():
"""
When validate_date_range_failure() method call and date more then 48 hour then raise value error.
"""
from FireEyeNX import validate_date_range
fetch_time = '49 hours'
with pytest.raises(ValueError) as e:
validate_date_range(fetch_time)
assert str(e.value) == 'The First fetch time interval should be up to 48 hour as per API limitation.'
def test_fetch_limit_when_valid_value_success(mocker):
"""
When valid fetch_limit is given, test should pass.
"""
from FireEyeNX import get_fetch_limit
mocker.patch.object(demisto, 'params', return_value=PARAMS)
fetch_limit = get_fetch_limit(fetch_limit='')
assert fetch_limit == 50
@pytest.mark.parametrize('inputs', ['0', '201', 'dfdf'])
def test_fetch_limit_when_invalid_value_should_raise_exception(mocker, inputs):
"""
When invalid fetch limit is passed, should raises value error.
"""
from FireEyeNX import get_fetch_limit
mocker.patch.object(demisto, 'params', return_value={'fetch_limit': inputs})
with pytest.raises(ValueError) as e:
get_fetch_limit(inputs)
assert str(e.value) == 'Value of Fetch Limit should be an integer and between range 1 to 200.'
def test_command_called_from_main_success(mocker, client):
"""
When main function is called get_reports_command should be called if that command is triggered.
"""
import FireEyeNX
mocker.patch.object(demisto, 'command', return_value='fireeye-nx-get-reports')
mocker.patch.object(FireEyeNX, 'get_reports_command', return_value='No report contents were '
'found for the given argument(s).')
FireEyeNX.main()
assert FireEyeNX.get_reports_command.called
@patch('FireEyeNX.Client.http_request')
def test_get_artifacts_metadata_by_alert_command_invalid_uuid(client):
"""
When fireeye-nx-get-artifacts-metadata-by-alert command executes with uuid and it failure due to
artifacts metadata is not present.
"""
from FireEyeNX import get_artifacts_metadata_by_alert_command
client.http_request.return_value = {
'artifactsInfoList': []
}
args = {
'uuid': 'abc-dsh-didA'
}
return_value = get_artifacts_metadata_by_alert_command(client, args)
assert return_value == 'No artifacts metadata were found for the given argument(s).'
@patch('FireEyeNX.Client.http_request')
def test_get_artifacts_metadata_by_alert_command_success(client):
"""
When fireeye-nx-get-artifacts-metadata-by-alert command executes successfully then context output and
response should match.
"""
from FireEyeNX import get_artifacts_metadata_by_alert_command
args = {
'uuid': 'test'
}
with open('TestData/get_artifacts_metadata_by_alert_response.json') as f:
expected_res = json.load(f)
client.http_request.return_value = expected_res
cmd_res = get_artifacts_metadata_by_alert_command(client, args)
with open('TestData/get_artifacts_metadata_by_alert_context.json', encoding='utf-8') as f:
expected_ec = json.load(f)
with open('TestData/get_artifacts_metadata.md') as f:
expected_hr = f.read()
assert cmd_res.raw_response == expected_res
assert cmd_res.outputs == expected_ec
assert cmd_res.readable_output == expected_hr
@patch('FireEyeNX.BaseClient._http_request')
@pytest.mark.parametrize('args', [
{
'report_type': ALERT_DETAILS_REPORT,
'type': 'pdf',
'time_frame': 'between',
'start_time': '2020-01-29',
'end_time': '2020-02-29T23:59:59+13:00',
'infection_id': 'rt', 'infection_type': 'all'
},
{
'report_type': ALERT_DETAILS_REPORT,
'type': 'pdf',
'time_frame': 'between',
'start_time': '2020-01-29T23:59:59+13:01',
'end_time': '2020-02-29',
'infection_id': 'rt', 'infection_type': 'all'
}
])
def test_get_reports_success(mock_request, args, client):
"""
When fireeye-nx-get-reports command execute and passed valid arguments, it should be successful.
"""
from FireEyeNX import get_reports_command
with open('TestData/get_reports_response.pdf', encoding='utf-8') as f:
expected_res = f.read()
headers = {
'X-FeApi-Token': API_TOKEN,
'Content-Type': 'application/pdf',
'Content-Length': 56
}
mock_request.return_value = mock_http_response(status=200, headers=headers, content=expected_res)
result = get_reports_command(client, args=args)
assert result.get('File', '') != ''
assert result.get('FileID', '') != ''
@patch('FireEyeNX.BaseClient._http_request')
def test_get_reports_no_records_found(mock_request, client):
"""
When fireeye-nx-get-reports command returns empty response then corresponding message should be populated.
"""
from FireEyeNX import get_reports_command
with open('TestData/get_reports_response.pdf', encoding='utf-8') as f:
expected_res = f.read()
headers = {
'X-FeApi-Token': API_TOKEN,
'Content-Type': 'application/pdf',
'Content-Length': 0
}
mock_request.return_value = mock_http_response(status=200, headers=headers, content=expected_res)
args = {
'report_type': 'IPS Top N Attackers Report',
'limit': 56,
'interface': 'C',
'type': 'csv'
}
result = get_reports_command(client, args=args)
assert result == 'No report contents were found for the given argument(s).'
def test_reports_command_invalid_report_type(client):
"""
When fireeye-nx-get-reports command is provided invalid report type argument
it should give an error message.
"""
from FireEyeNX import get_reports_params
args = {
'report_type': 'XYZ'
}
with pytest.raises(ValueError) as e:
get_reports_params(args=args)
assert str(e.value) == 'The given value for report_type is invalid.'
def test_reports_command_invalid_output_type(client):
"""
When fireeye-nx-get-reports command is provided invalid output type argument
it should give an error message.
"""
from FireEyeNX import get_reports_params
args = {
'report_type': ALERT_DETAILS_REPORT,
'type': 'csv'
}
with pytest.raises(ValueError) as e:
get_reports_params(args=args)
assert str(e.value) == 'The given value for the argument type (report\'s format) is invalid. Valid value(s): pdf.'
def test_reports_command_invalid_limit(client):
"""
When fireeye-nx-get-reports command is provided with invalid value of limit it should give an error message.
"""
from FireEyeNX import get_reports_params
args = {
'report_type': 'IPS Top N Attackers Report',
'limit': 'dummy',
'end_time': '--'
}
with pytest.raises(ValueError) as e:
get_reports_params(args=args)
assert str(e.value) == 'The given value for limit is invalid. Expected integer value.'
def test_reports_command_missing_alert_argument():
"""
When fireeye-nx-get-reports command is provided with same value of
start_time and end_time it should give an error message.
"""
from FireEyeNX import get_reports_params
args = {
'report_type': ALERT_DETAILS_REPORT,
'type': 'pdf'
}
with pytest.raises(ValueError) as e:
get_reports_params(args=args)
assert str(e.value) == 'For fetching Alert Details Report, "infection_id" and ' \
'"infection_type" arguments are required.'
@pytest.mark.parametrize('args', [
{
'duration': '1_hour',
'end_time': '2020',
'mvx_correlated_only': 'dummy',
'start_time': '2020'
},
{
'duration': '1_hour',
'end_time': '2020',
'mvx_correlated_only': 'dummy',
'start_time': '2020'
}
])
def test_events_command_invalid_bool_value(args):
"""
When fireeye-nx-get-events command is provided with invalid bool value of an argument
it should give an error message.
"""
from FireEyeNX import get_events_params
with pytest.raises(ValueError) as e:
get_events_params(args=args)
assert str(e.value) == 'The given value for mvx_correlated_only argument is invalid. Valid values: true, false.'
def test_request_timeout_success():
"""
When provided valid request timeout then test should be passed.
"""
from FireEyeNX import get_request_timeout
request_timeout = '5'
request_timeout_int = get_request_timeout(request_timeout)
assert request_timeout_int == int(request_timeout)
@pytest.mark.parametrize('request_timeout', ['invalid_str_value', '-5', '0'])
def test_request_timeout_invalid_value(request_timeout):
"""
When provided invalid request timeout then display error message.
"""
from FireEyeNX import get_request_timeout
# Execute
with pytest.raises(ValueError) as e:
get_request_timeout(request_timeout)
# Assert
assert str(e.value) == 'HTTP(S) Request timeout parameter must be a positive integer.'
def test_request_timeout_large_value_failure():
"""
When too large value provided for request timeout then raised value error and
appropriate error message should display.
"""
from FireEyeNX import get_request_timeout
request_timeout = '990000000000000000'
# Execute
with pytest.raises(ValueError) as e:
get_request_timeout(request_timeout)
assert str(e.value) == 'Value is too large for HTTP(S) Request Timeout.'
@patch('FireEyeNX.BaseClient._http_request')
def test_http_request_when_response_type_is_json_return_type_should_match(mock_request, client):
"""
When http_request called and response type is json and content is '{}' passed
then response should match with {}.
"""
headers = {
'X-FeApi-Token': API_TOKEN,
'Content-Type': CONTENT_TYPE_JSON
}
mock_request.return_value = MockResponse(status_code=200, content='{}', headers=headers)
mock_request.return_value.ok = True
resp = client.http_request(method='GET', url_suffix='')
assert resp == {}
def test_handle_error_response_when_status_code_not_in_list_then_raise_for_status():
"""
When handle_error_response method called and status is not in list then it must raise DemistoException.
"""
from FireEyeNX import Client
resp = MockResponse(content='{}', headers={}, status_code=200)
with pytest.raises(DemistoException):
Client.handle_error_response(resp)
def test_handle_error_response_when_content_type_zip(client):
"""
When handle_error_response method called and status is not in list and content type is application/zip
then it must called raise_for_status.
"""
from FireEyeNX import Client
resp = {
}
resp = mock_http_response(text='Could not fetch any artifact due to wrong uuid',
headers={'Content-Type': 'application/zip'}, status=403)
with pytest.raises(DemistoException) as e:
Client.handle_error_response(resp)
assert str(e.value) == 'Could not fetch any artifact due to wrong uuid'
def test_handle_error_response_when_content_not_type_json_throw_value_error():
"""
When handle_error_response method called and json string have error then through ValueError and it passed
and again raise DemistoException.
"""
from FireEyeNX import Client
resp = MockResponse(content='{[]}', headers={}, status_code=400)
with pytest.raises(DemistoException) as e:
Client.handle_error_response(resp)
assert str(e.value) == 'An error occurred while fetching the data. '
def test_set_integration_context_api_token_empty_failure():
"""
When set_integration_context method called api token not there then must throw ValueError.
"""
from FireEyeNX import Client
resp = MockResponse(content='{}', headers={}, status_code=200)
with pytest.raises(ValueError) as e:
Client.set_integration_context(resp)
assert str(e.value) == 'No api token found. Please try again'
@patch('FireEyeNX.replace_alert_url_key_domain_to_instance_url')
@patch('FireEyeNX.Client.http_request')
def test_get_alerts_command_success(mock_request, replace_url, client):
"""
When fireeye-nx-get-alerts command is passed with valid arguments, it should be successful.
"""
from FireEyeNX import get_alerts_command
args = {
'src_ip': '0.0.0.0',
'dst_ip': '0.0.0.0',
'duration': '1_hour',
'start_time': '2017-06-21T16:30:00',
'file_name': 'file_name',
'file_type': 'file_type',
'info_level': 'extended',
'malware_name': 'malware_name',
'malware_type': 'domain_match',
'url': SAMPLE_URL
}
with open('TestData/get_alerts_response.json', encoding='utf-8') as f:
expected_res = json.load(f)
with open('TestData/get_alerts_context.json', encoding='utf-8') as f:
expected_outputs = json.load(f)
with open('TestData/get_alerts.md', encoding='utf-8') as f:
expected_hr = f.read()
mock_request.return_value = expected_res
replace_url.return_value = None
replace_alert_url = True
instance_url = SAMPLE_URL
cmd_result = get_alerts_command(client, args, replace_alert_url, instance_url)
assert cmd_result.raw_response == expected_res
assert cmd_result.outputs == expected_outputs
assert cmd_result.readable_output == expected_hr
@patch('FireEyeNX.Client.http_request')
def test_get_alerts_command_no_record_failure(mock_request, client):
"""
When fireeye-nx-get-alerts command called and passed with valid arguments but records are not present
then it must return error message.
"""
from FireEyeNX import get_alerts_command
args = {
'alert_id': '1',
'src_ip': '0.0.0.0',
'dst_ip': '0.0.0.0',
'duration': '1_hour',
'start_time': '2017-06-21T16:30:00',
'file_name': 'file_name',
'file_type': 'file_type',
'info_level': 'concise',
'malware_name': 'malware_name',
'malware_type': 'domain_match',
'url': SAMPLE_URL
}
mock_request.return_value = {}
replace_alert_url = False
instance_url = SAMPLE_URL
cmd_result = get_alerts_command(client, args, replace_alert_url, instance_url)
assert cmd_result == 'No alert(s) were found for the given argument(s).'
@patch('FireEyeNX.Client.http_request')
def test_get_events_command_no_record_failure(mock_request, client):
"""
When fireeye-nx-get-events command called passed with valid arguments but records are not present
then it must return error message.
"""
from FireEyeNX import get_events_command
args = {
'duration': '1_hour',
'end_time': '2017-06-21T16:30:00',
'mvx_correlated_only': 'true'
}
mock_request.return_value = {}
cmd_result = get_events_command(client, args=args)
assert cmd_result == 'No event(s) were found for the given argument(s).'
@patch('FireEyeNX.Client.http_request')
def test_get_artifacts_by_alert_command_zero_content_length_failure(mock_request, client):
"""
When fireeye-nx-get-artifacts-by-alert command called with Content-Length is zero
then it should return error message.
"""
from FireEyeNX import get_artifacts_by_alert_command
headers = {
'X-FeApi-Token': API_TOKEN,
'Content-Type': CONTENT_TYPE_ZIP,
'Content-Length': 0
}
args = {
'uuid': 'abc-def'
}
mock_request.return_value = MockResponse(status_code=200, headers=headers, content='test')
cmd_result = get_artifacts_by_alert_command(client, args=args)
assert cmd_result == 'No artifacts data were found for the given argument(s).'
@patch('FireEyeNX.BaseClient._http_request')
def test_get_artifacts_by_alert_command_success(mock_request, client):
"""
When fireeye-nx-get-artifacts-by-alert command called and passed with valid arguments, it should be successful.
"""
from FireEyeNX import get_artifacts_by_alert_command
args = {'uuid': 'abc-def-ghI'}
with open('TestData/test-get-artifacts-by-alert.zip', encoding='IBM437') as f:
expected_res = f.read()
headers = {
'X-FeApi-Token': API_TOKEN,
'Content-Type': CONTENT_TYPE_ZIP,
'Content-Length': 56
}
mock_request.return_value = mock_http_response(status=200, headers=headers, content=expected_res)
result = get_artifacts_by_alert_command(client, args=args)
assert result.get('File', '') != ''
assert result.get('FileID', '') != ''
def test_is_supported_context_type_failure(client):
"""
When is_supported_context_type() method called invalid argument then should return False.
"""
assert client.is_supported_context_type('application/octet-stream') is False
def test_is_supported_context_type_success(client):
"""
When is_supported_context_type() method called valid argument then should return True.
"""
assert client.is_supported_context_type(CONTENT_TYPE_ZIP) is True
@patch('FireEyeNX.Client.http_request')
def test_get_events_command_success(mock_request, client):
"""
When fireeye-nx-get-events command executes successfully then context output and
response should match.
"""
from FireEyeNX import get_events_command
args = {
'duration': '48_hours',
'mvx_correlated_only': 'false',
'end_time': '2020-08-10T06:31:00.000+00:00'
}
with open('TestData/get_events_response.json') as f:
expected_res = json.load(f)
mock_request.return_value = expected_res
cmd_res = get_events_command(client, args)
with open('TestData/get_events_context.json', encoding='utf-8') as f:
expected_outputs = json.load(f)
with open('TestData/get_events.md') as f:
expected_hr = f.read()
assert cmd_res.raw_response == expected_res
assert cmd_res.outputs == expected_outputs
assert cmd_res.readable_output == expected_hr
def test_add_time_suffix_into_arguments(client):
"""
When add_time_suffix_into_arguments() method called it should add time suffix if format is suitable
else return as it is.
"""
from FireEyeNX import add_time_suffix_into_arguments
args = {
'start_time': '2020-05-20',
'end_time': '2020-05-20'
}
add_time_suffix_into_arguments(args)
actual_output = {'end_time': '2020-05-20T00:00:00.000-00:00',
'start_time': '2020-05-20T00:00:00.000-00:00'}
assert actual_output == args
def test_replace_alert_url_key_domain_to_instance_url():
"""
When replace_alert_url_key_domain_to_instance_url() method called it should Change domain
of 'alertUrl' to the instance URL.
"""
from FireEyeNX import replace_alert_url_key_domain_to_instance_url
alerts_resp = [
{
'alertUrl': 'https://WWW.fireeye-1234/event/evenid=123'
},
{
'alertUrl': 'http://www.fireeye-1234/event/evenid=124' # NOSONAR
}
]
instance_url = 'https://example.com'
actual_res = [
{
'alertUrl': 'https://example.com/event/evenid=123'
},
{
'alertUrl': 'https://example.com/event/evenid=124'
}
]
replace_alert_url_key_domain_to_instance_url(alerts_resp, instance_url)
assert actual_res == alerts_resp
@patch('FireEyeNX.replace_alert_url_key_domain_to_instance_url')
@patch('FireEyeNX.set_attachment_file')
@patch('FireEyeNX.BaseClient._http_request')
@patch('FireEyeNX.Client.get_api_token')
def test_fetch_incidents_for_alert_success(mock_api_token, mock_request, set_attachment, replace_url, client):
"""
When fetch_incidents() method called with fetch_type='Alerts' and pass all required arg it success.
"""
from FireEyeNX import fetch_incidents
# Configure
mock_last_run = {
'start_time': datetime.now().replace(tzinfo=timezone.utc).timestamp()
}
dummy_first_fetch = 1
mock_fetch_limit = 12
mock_malware_type = 'malware-type'
mock_api_token.return_value = API_TOKEN
with open('TestData/fetch_incidents_alert_response.json', 'r') as f:
dummy_response = f.read()
resp = Response()
resp._content = dummy_response.encode()
resp.status_code = 200
resp._ok = True
resp.headers = {
'Content-Type': CONTENT_TYPE_JSON
}
mock_request.return_value = resp
set_attachment.return_value = None
replace_url.return_value = None
# Execute
next_run, incidents = fetch_incidents(
client=client,
malware_type=mock_malware_type,
last_run=mock_last_run,
first_fetch=dummy_first_fetch,
fetch_limit=mock_fetch_limit,
fetch_type='Alerts',
mvx_correlated=False,
replace_alert_url=True,
instance_url=SAMPLE_URL,
fetch_artifacts=True,
is_test=False
)
# Assert
assert len(incidents) == mock_fetch_limit
assert next_run.get('start_time') is not None
@patch('FireEyeNX.Client.http_request')
def test_set_attachment_file(mock_request, client):
# incident: dict, uuid: str, headers: dict
from FireEyeNX import set_attachment_file
uuid = 'abc'
headers = {}
resp = Response()
resp._ok = True
resp.status_code = 200
resp._content = b'a'
resp.headers = {
'Content-Length': '1'
}
mock_request.return_value = resp
excepted_incident = {}
set_attachment_file(client, excepted_incident, uuid, headers)
assert excepted_incident != {}
@patch('FireEyeNX.BaseClient._http_request')
@patch('FireEyeNX.Client.get_api_token')
def test_fetch_incidents_for_event_success(mock_api_token, mock_request, client):
"""
When fetch_incidents() method called with fetch_type='Alerts' and pass all required arg it success.
"""
from FireEyeNX import fetch_incidents
# Configure
mock_last_run = {
'start_time': datetime.now().replace(tzinfo=timezone.utc).timestamp()
}
dummy_first_fetch = 1
mock_fetch_limit = 1
mock_api_token.return_value = API_TOKEN
with open('TestData/fetch_incidents_event_response.json', 'r') as f:
dummy_response = f.read()
resp = Response()
resp._content = dummy_response.encode()
resp.status_code = 200
resp._ok = True
resp.headers = {
'Content-Type': CONTENT_TYPE_JSON
}
mock_request.return_value = resp
next_run, incidents = fetch_incidents(
client=client,
malware_type=None,
last_run=mock_last_run,
first_fetch=dummy_first_fetch,
fetch_limit=mock_fetch_limit,
fetch_type='IPS Events',
mvx_correlated=True,
replace_alert_url=True,
instance_url='',
fetch_artifacts=False,
is_test=False
)
# Assert
assert len(incidents) == mock_fetch_limit
assert next_run.get('start_time') is not None
| |
__author__ = 'davburge'
import ss_constants
import ss_inputs
import ss_math
import ss_validators
import Tkinter as tk
from Tkconstants import *
class Application(tk.Frame):
def __init__(self, master=None):
'''Main frame of the application'''
tk.Frame.__init__(self, master)
self.master = master
self.grid(column=0, row=0, sticky=(N, W, E, S))
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
# Ensures that tk variables are setup before use
ss_inputs.setup()
# These two Widgets get deleted and reformed, start as None to prevent errors
self.classWidget = None
self.subskillWidget = None
self.createWidgets()
def createWidgets(self):
'''Builds gui widgets'''
self.buildShipClass()
self.buildSkillDisplay()
self.buildStatDisplay()
self.buildAugDisplay()
self.buildModDisplay()
self.buildCalcButton()
self.buildQuitButton()
def buildShipClass(self):
'''Builds the ship class radiobuttons and edit ship button'''
shipClassLabel = tk.LabelFrame(self.master, text='Ship Type')
shipClassLabel.grid(column=0, row=0, rowspan=2, sticky=N+S, padx=2)
for key, value in ss_constants.ships.items():
if key != 'all':
radiobutton = tk.Radiobutton(shipClassLabel, text=value, variable=ss_inputs.shipClass, value=key)
if key == 'lfi':
i=0
elif key == 'hfi':
i=1
elif key == 'sfr':
i=2
elif key == 'ifr':
i=3
elif key == 'cap':
i=4
else:
i=-1
radiobutton.grid(column=0, row=i, sticky=W)
shipEditButton = tk.Button(shipClassLabel, text="Edit Ship Mods", command=self.buildShipEditDisplay)
shipEditButton.grid(column=0, row=5)
def buildShipEditDisplay(self):
'''Builds edit ship popup'''
if hasattr(self, 'shipEditWindow') and self.shipEditWindow is not None:
self.shipEditWindow.deiconify()
else:
self.shipEditWindow = tk.Toplevel(self)
self.shipEditWindow.resizable(0,0)
self.shipEditWindow.title("Ship")
self.shipEditWindow.protocol("WM_DELETE_WINDOW", self.shipEditWindow.withdraw)
inbuiltStatsLabel = tk.LabelFrame(self.shipEditWindow, text="Ship Inbuilt Stats")
inbuiltStatsLabel.grid(column=0, row=0, columnspan=2, padx=1, sticky=W+E)
vcmd_all = (self.shipEditWindow.register(ss_validators.bonusStatValidate),
'%d', '%i', '%P', '%s', '%S')
i=0
for key, value in ss_inputs.shipMods.items():
# Uses a percent sign for all built inbuilt elec charge
if key == 'inbuiltElec':
symbol = '/sec'
vcmd = (self.shipEditWindow.register(ss_validators.shipStatValidate),
'%d', '%i', '%P', '%s', '%S')
else:
symbol = '%'
vcmd = vcmd_all
statLabel = tk.Label(inbuiltStatsLabel, text=ss_constants.statNames[key], padx=3)
statLabel.grid(column=0, row=i, sticky=E)
statEntry = tk.Entry(inbuiltStatsLabel, width=6, justify=RIGHT, textvariable=value,
validate='key', validatecommand=vcmd)
statEntry.grid(column=1, row=i)
percentLabel = tk.Label(inbuiltStatsLabel, text=symbol, padx=3)
percentLabel.grid(column=2, row=i)
i+=1
damageLabel = tk.Label(inbuiltStatsLabel, text=ss_constants.statNames['damageType'], padx=3)
damageLabel.grid(column=0, row=i, sticky=E)
damageMenubutton = tk.Menubutton(inbuiltStatsLabel, textvariable=ss_inputs.damageType, relief=RAISED)
damageMenubutton.grid(column=1, columnspan=2, row=i)
damageMenubutton.menu = tk.Menu(damageMenubutton, tearoff=0)
damageMenubutton['menu'] = damageMenubutton.menu
for key, value in ss_constants.elementTypes.items():
damageMenubutton.menu.add_radiobutton(label=key, variable=ss_inputs.damageType)
i+=1
resistLabel = tk.Label(inbuiltStatsLabel, text=ss_constants.statNames['resistType'], padx=3)
resistLabel.grid(column=0, row=i, sticky=E)
resistMenubutton = tk.Menubutton(inbuiltStatsLabel, textvariable=ss_inputs.resistType, relief=RAISED)
resistMenubutton.grid(column=1, columnspan=2, row=i)
resistMenubutton.menu = tk.Menu(resistMenubutton, tearoff=0)
resistMenubutton['menu'] = resistMenubutton.menu
for key, item in ss_constants.elementTypes.items():
resistMenubutton.menu.add_radiobutton(label=key, variable=ss_inputs.resistType)
i+=1
helpButton = tk.Button(self.shipEditWindow, text="Help", command=self.shipHelp)
helpButton.grid(column=0, row=i)
updateButton = tk.Button(self.shipEditWindow, text="Update and Close", command=self.shipEditWindow.withdraw)
updateButton.grid(column=1, row=i, pady=2)
def shipHelp(self):
'''Builds help popup for ship edit popup'''
if hasattr(self, 'helpWindow') and self.helpWindow is not None:
self.helpWindow.deiconify()
else:
self.helpWindow = tk.Toplevel(self)
self.helpWindow.resizable(0,0)
self.helpWindow.title("Help")
self.helpWindow.protocol("WM_DELETE_WINDOW", self.helpWindow.withdraw)
helpMessage = tk.Message(self.helpWindow, text=ss_constants.shipHelp, justify=CENTER)
helpMessage.grid(column=0, row=0)
def buildSkillDisplay(self):
'''Builds the skill display section'''
self.skillDisplayLabel = tk.LabelFrame(self.master, text='Skills')
self.skillDisplayLabel.grid(column=1, row=0, columnspan=3, rowspan=2, sticky=N, padx=2)
self.buildFocusSkill()
self.setDefaultClass()
self.buildSubskills()
self.buildMiscSkills()
def buildFocusSkill(self):
'''Builds the focus skill radiobuttons and level entry'''
focusLabel = tk.LabelFrame(self.skillDisplayLabel, text='Focus Skill')
focusLabel.grid(column=0, row=0, sticky=N+S)
for key, value in ss_constants.skill_tree.items():
radiobutton = tk.Radiobutton(focusLabel, text=value['name'], variable=ss_inputs.focusSkill,
value=key, command=self.setDefaultClass)
if key == 'combat_focus':
i=0
elif key =='recon_focus':
i=1
elif key == 'support_focus':
i=2
elif key == 'fleet_focus':
i=3
else: # Unused
i=-1
radiobutton.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(focusLabel, text='Level (0-22):')
level = tk.Spinbox(focusLabel, from_=0, to=22, width=3, textvariable=ss_inputs.focusLevel)
label.grid(column=0, row=4, sticky=W)
level.grid(column=1, row=4, sticky=E, padx=4)
def buildClassSkill(self):
'''Builds the class skill radiobuttons and level entry'''
if self.classWidget is not None:
# Removes widget entirely to reset proper skill selection
self.classWidget.grid_remove()
classLabel = tk.LabelFrame(self.skillDisplayLabel, text='Class Skill')
classLabel.grid(column=1, row=0, columnspan=3, sticky=N+S)
for key, value in ss_constants.skill_tree[ss_inputs.focusSkill.get()].items():
if key != 'name':
radiobutton = tk.Radiobutton(classLabel, text=value['name'], variable=ss_inputs.classSkill,
value=key, command=self.buildSubskills)
# Chooses index based upon typical in-game listing of which skills are first
if (key == 'berserker' or key == 'speed_demon'
or key == 'shield_monkey' or key == 'fleet_commander'):
i=0
else:
i=1
radiobutton.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(classLabel, text='Level (0-22):')
level = tk.Spinbox(classLabel, from_=0, to=22, width=3, textvariable=ss_inputs.classLevel)
label.grid(column=0, row=2, sticky=W)
level.grid(column=1, row=2, sticky=E, padx=4)
self.classWidget = classLabel
# Runs buildSubskills() to reset subskills radiobuttons based upon new chosen first index
self.buildSubskills()
def buildSubskills(self):
'''Builds the subskill radiobuttons and level entries'''
if self.subskillWidget is not None:
# Removes widget entirely to reset proper skill selection
self.subskillWidget.grid_remove()
subskillLabel = tk.LabelFrame(self.skillDisplayLabel, text='Subskills')
subskillLabel.grid(column=4, row=0, sticky=NSEW)
i=0
for key, value in ss_constants.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()].items():
if key != 'name':
skill = None
skill_level = None
# Uses subSkill_#/subSkill_#Level to reduce variable usage
if i==0:
ss_inputs.subSkill_1.set(key)
skillName = tk.Label(subskillLabel, text=value)
skillName.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(subskillLabel, text='Level (0-20):')
level = tk.Spinbox(subskillLabel, from_=0, to=20, width=3, textvariable=ss_inputs.subskill_1Level)
label.grid(column=0, row=i+1, sticky=W)
level.grid(column=1, row=i+1, sticky=E, padx=4)
elif i==2:
ss_inputs.subSkill_2.set(key)
skillName = tk.Label(subskillLabel, text=value)
skillName.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(subskillLabel, text='Level (0-20):')
level = tk.Spinbox(subskillLabel, from_=0, to=20, width=3, textvariable=ss_inputs.subskill_2Level)
label.grid(column=0, row=i+1, sticky=W)
level.grid(column=1, row=i+1, sticky=E, padx=4)
elif i==4:
ss_inputs.subSkill_3.set(key)
skillName = tk.Label(subskillLabel, text=value)
skillName.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(subskillLabel, text='Level (0-20):')
level = tk.Spinbox(subskillLabel, from_=0, to=20, width=3, textvariable=ss_inputs.subskill_3Level)
label.grid(column=0, row=i+1, sticky=W)
level.grid(column=1, row=i+1, sticky=E, padx=4)
i+=2
self.subskillWidget = subskillLabel
def buildMiscSkills(self):
'''Builds aug tweak and imperial tweak inputs'''
augTweakLabel = tk.Label(self.skillDisplayLabel, text="Aug Tweaking (0-25):")
augTweakLabel.grid(column=0, row=1, sticky=E)
augTweaklevel = tk.Spinbox(self.skillDisplayLabel, from_=0, to=25, width=3,
textvariable=ss_inputs.augTweakLevel)
augTweaklevel.grid(column=1, row=1, sticky=W)
impTweakLabel = tk.Label(self.skillDisplayLabel, text="Imperial Tweaking (0-5):")
impTweakLabel.grid(column=2, row=1, columnspan=3, padx=40, sticky=E)
impTweaklevel = tk.Spinbox(self.skillDisplayLabel, from_=0, to=10, width=3,
textvariable=ss_inputs.impTweakLevel)
impTweaklevel.grid(column=4, row=1, padx=6, sticky=E)
def setDefaultClass(self):
'''Called by choosing a focus skill, resets proper pre-selected class'''
if (ss_inputs.focusSkill.get() == 'combat_focus'):
ss_inputs.classSkill.set('berserker')
elif (ss_inputs.focusSkill.get() == 'recon_focus'):
ss_inputs.classSkill.set('speed_demon')
elif (ss_inputs.focusSkill.get() == 'support_focus'):
ss_inputs.classSkill.set('shield_monkey')
elif (ss_inputs.focusSkill.get() == 'fleet_focus'):
ss_inputs.classSkill.set('fleet_commander')
self.buildClassSkill()
def buildStatDisplay(self):
'''Builds stat inputs, bonus displays, and total displays'''
displayLabel = tk.LabelFrame(self.master, text='Input and Output')
displayLabel.grid(column=0, row=2, columnspan=2, sticky=W+E, padx=2)
self.buildBaseInput(displayLabel)
self.buildBonusAmount(displayLabel)
self.buildFinalDisplay(displayLabel)
def buildBaseInput(self, master):
'''Builds stat inputs with validator'''
label = tk.Label(master, text='Base Input', padx=20)
baseInputLabel = tk.LabelFrame(master, labelwidget=label, borderwidth=0)
baseInputLabel.grid(column=0, row=0, sticky=W+E)
vcmd_all = (master.register(ss_validators.shipStatValidate),
'%d', '%i', '%P', '%s', '%S')
i=0
for key, value in ss_inputs.baseInputs.items():
statName = tk.Label(baseInputLabel, text=ss_constants.statNames[key])
statName.grid(column=0, row=i, sticky=W)
if key == 'shieldCharge' or key == 'energyCharge' or key == 'RoF' or key == 'resist':
width = 5
vcmd = (master.register(ss_validators.shipDecimalValidate),
'%d', '%i', '%P', '%s', '%S')
if key == 'RoF':
unitText = 's'
elif key == 'shieldCharge':
unitText = '/s'
elif key == 'energyCharge':
unitText = '/1.2s'
elif key == 'resist':
unitText = '%'
vcmd = (master.register(ss_validators.bonusStatValidate),
'%d', '%i', '%P', '%s', '%S')
else:
unitText = 'err'
unit = tk.Label(baseInputLabel, text=unitText)
unit.grid(column=2, row=i, sticky=W)
else:
width = 10
vcmd = vcmd_all
amount = tk.Entry(baseInputLabel, width=width, justify=RIGHT, textvariable=value['initial'],
validate='key', validatecommand=vcmd)
# Width/5 so that larger entries span 2 columns
amount.grid(column=1, row=i, columnspan=(width/5), sticky=W, padx=2)
if key == 'elecTemp':
statName.configure(text="Firing Energy:")
statName.update()
i+=1
def buildBonusAmount(self, master):
'''Builds the display for bonus amount'''
label = tk.Label(master, text='Bonus', padx=10)
bonusAmountLabel = tk.LabelFrame(master, labelwidget=label, borderwidth=0)
bonusAmountLabel.grid(column=1, row=0, sticky=NW+E)
i=0
for key, value in ss_inputs.baseInputs.items():
mathSign = u"\u2715" # is the multiplication sign
if key == 'RoF':
mathSign = u"\u00F7" # is the division sign
symbol = tk.Label(bonusAmountLabel, text=mathSign)
amount = tk.Entry(bonusAmountLabel, width=4, state="readonly",
justify=RIGHT, textvariable=value['bonus'])
symbol.grid(column=0, row=i, sticky=W+E)
amount.grid(column=1, row=i, sticky=E, padx=6)
i+=1
def buildFinalDisplay(self, master):
'''Builds the display for the total amount'''
label = tk.Label(master, text='Overall', padx=15)
finalLabel = tk.LabelFrame(master, labelwidget=label, borderwidth=0)
finalLabel.grid(column=2, row=0, sticky=W+E)
i=0
for key, value in ss_inputs.baseInputs.items():
equalsSign = tk.Label(finalLabel, text="=")
equalsSign.grid(column=0, row=i, sticky=W)
width = 10
if key == 'shieldCharge' or key == 'energyCharge' or key == 'RoF':
width = 5
amount = tk.Entry(finalLabel, width=width, justify=RIGHT, state="readonly", textvariable=value['overall'])
# Width/5 so that larger entries span 2 columns
amount.grid(column=1, row=i, columnspan=(width/5), sticky=W, padx=2)
if width == 5:
if key == 'shieldCharge':
unitText = '/s'
elif key == 'energyCharge':
unitText = '/1.2s'
elif key == 'RoF':
unitText = 's'
else:
unitText = 'err'
unit = tk.Label(finalLabel, text=unitText)
unit.grid(column=2, row=i, sticky=W)
i+=1
def buildAugDisplay(self):
'''Builds aug display section'''
self.augDisplayLabel = tk.LabelFrame(self.master, text='Augmenters')
self.augDisplayLabel.grid(column=2, row=2, columnspan=2, rowspan=3, sticky=NE+W, padx=2)
self.buildAugConfig()
def buildAugConfig(self):
'''Builds aug number change input and freeze box'''
self.augLabelList = []
self.augButtonList = []
self.augClearButtonList = []
label = tk.Label(self.augDisplayLabel, text='Number:')
label.grid(column=0, row=0, padx=4, sticky=E)
self.augNumber = tk.Spinbox(self.augDisplayLabel, from_=0, to=6, width=2, state="readonly",
textvariable=ss_inputs.augNumber, command=self.changeAugAmount)
self.augNumber.grid(column=1, row=0, sticky=E)
self.freezeAugsCheck = tk.IntVar()
checkbutton = tk.Checkbutton(self.augDisplayLabel, text="Freeze",
variable=self.freezeAugsCheck, command=self.freezeAugs)
checkbutton.grid(column=2, row=0, columnspan=2, sticky=E)
def changeAugAmount(self):
'''Adds or removes a set of aug edit/reset buttons'''
if int(ss_inputs.augNumber.get()) > len(self.augButtonList):
numberToAdd = int(ss_inputs.augNumber.get())-(len(self.augButtonList))
for i in range(numberToAdd):
position = len(self.augButtonList) + 1
name = "Aug " + str(position) + ":"
augLabel = tk.Label(self.augDisplayLabel, width=15, anchor=W, text=name)
augLabel.grid(column=0, row=position, columnspan=3, padx=4, sticky=W)
self.augLabelList.append(augLabel)
editButton = tk.Button(self.augDisplayLabel, text="Edit",
command=lambda: self.editAug(position))
editButton.grid(column=3, row=position, sticky=E)
self.augButtonList.append(editButton)
resetButton = tk.Button(self.augDisplayLabel, text="Reset",
command=lambda: self.resetAug(position))
resetButton.grid(column=4, row=position, padx=4, sticky=W)
self.augClearButtonList.append(resetButton)
ss_inputs.augs.append(ss_inputs.augmenter())
else:
for i in range(len(self.augButtonList)-(int(ss_inputs.augNumber.get()))):
toRemove = self.augLabelList.pop()
toRemove.grid_remove()
toRemove = self.augButtonList.pop()
toRemove.grid_remove()
toRemove = self.augClearButtonList.pop()
toRemove.grid_remove()
toRemove = None
ss_inputs.augs.pop()
def editAug(self, augNum):
'''Edit augmenter popup'''
augNum = augNum - 1
if not hasattr(self, 'augWindows'):
self.augWindows = []
if len(self.augWindows) > augNum and self.augWindows[augNum] is not None:
self.augWindows[augNum].deiconify()
else:
popup = augToplevel(master=self, augNum=augNum)
popup.resizable(0,0)
self.augWindows.insert(augNum, popup)
title = ss_inputs.augs[augNum]['name'].get()
if title == "":
title = "Aug " + str(augNum + 1)
popup.title(title)
statLabel = tk.Label(popup, text=ss_constants.statNames['name'], padx=3)
statLabel.grid(column=0, row=0, sticky=W+E)
statEntry = tk.Entry(popup, width=15, justify=RIGHT, textvariable=ss_inputs.augs[augNum].get('name'))
statEntry.grid(column=1, row=0, columnspan=3)
augFrame= tk.LabelFrame(popup, text="Augmenter Stats")
augFrame.grid(column=0, row=1, columnspan=4, padx=1, sticky=W+E)
vcmd = (popup.register(ss_validators.bonusStatValidate),
'%d', '%i', '%P', '%s', '%S')
# vcmd = (popup.register(self.bonusStatValidate),
# '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
# %v = validation_type, %V = trigger_type, %W = widget_name
i=1
for key, value in ss_inputs.augs[augNum].items():
if key != 'enabled' and key != 'name':
statLabel = tk.Label(augFrame, text=ss_constants.statNames[key], padx=3)
statLabel.grid(column=0, row=i, sticky=E)
statEntry = tk.Entry(augFrame, width=6, justify=RIGHT, textvariable=value,
validate='key', validatecommand=vcmd)
statEntry.grid(column=1, row=i)
percentLabel = tk.Label(augFrame, text="%", padx=3)
percentLabel.grid(column=2, row=i)
i+=1
resetButton = tk.Button(popup, text="Reset", command=lambda: self.resetAug(augNum))
resetButton.grid(column=0, row=2)
updateButton = tk.Button(popup, text="Update and Close", command=lambda: popup.destroy())
updateButton.grid(column=1, row=2, columnspan=3)
def updateAug(self, augNum):
'''Update aug name in main frame'''
name = ss_inputs.augs[augNum]['name'].get()
if name == "":
name = "Aug " + str(augNum + 1) + ":"
self.augLabelList[augNum].configure(text=name)
self.augLabelList[augNum].update()
def resetAug(self, augNum):
'''Reset all values to null for the aug required'''
for key, value in ss_inputs.augs[augNum-1].items():
if key != 'enabled':
value.set("")
self.augLabelList[augNum-1].configure(text="Aug " + str(augNum) + ":")
self.augLabelList[augNum-1].update()
def freezeAugs(self):
'''Toggles the freezing of the amount of augs available and the input being disabled'''
if self.freezeAugsCheck.get(): # frozen
self.augNumber.configure(state=DISABLED)
self.augNumber.update()
else: # unfrozen
self.augNumber.configure(state="readonly")
self.augNumber.update()
def buildModDisplay(self):
'''Builds the buttons for item mods and auras'''
modDisplayLabel = tk.LabelFrame(self.master, text='Misc. Mods')
modDisplayLabel.grid(column=2, row=1, columnspan=3, rowspan=3, sticky=SW+SE)
button = tk.Button(modDisplayLabel, text="Edit Item Mods", command=self.buildItemModInput)
button.grid(column=0, row=0, sticky=E, padx=1, pady=1)
button = tk.Button(modDisplayLabel, text="Edit Auras", command=self.buildAuraInput)
button.grid(column=1, row=0, sticky=E, padx=1, pady=1)
def buildItemModInput(self):
'''Builds item mod input popup'''
itemModPopup = tk.Toplevel(master=self)
itemModPopup.resizable(0,0)
itemModPopup.title("Item Mods")
addModButton = tk.Button(itemModPopup, text="Add Item Mod", command=self.addItemMod)
addModButton.grid(column=0, row=0, pady=2, padx=2)
resetAllButton = tk.Button(itemModPopup, text="Reset and Remove All Mods", command=self.resetItemMods)
resetAllButton.grid(column=1, row=0, pady=2, padx=2)
self.modDisplayFrame = tk.LabelFrame(itemModPopup, text="Mods:")
self.modDisplayFrame.grid(column=0, row=1, columnspan=2, padx=2, sticky=NSEW)
message = tk.Message(self.modDisplayFrame, name='helpMessage', text='Section Disabled')
message.grid(column=0, row=0)
def addItemMod(self):
for name, widget in self.modDisplayFrame.children.items():
if name == 'helpMessage':
if widget.grid_info():
widget.grid_remove()
editModPopup = itemModToplevel(master=self)
editModPopup.resizable(0,0)
editModPopup.title("This function is disabled")
def resetItemMods(self):
for name, widget in self.modDisplayFrame.children.items():
if name == 'helpMessage':
widget.grid(column=0, row=0)
pass
def updateMods(self):
print "test"
def buildAuraInput(self):
'''Builds aura input popup'''
popup = tk.Toplevel(master=self)
popup.resizable(0,0)
popup.title("This function is disabled")
message = tk.Message(master=popup, text="(July 11, 2014): This is disabled until I can make the gui for it")
message.pack()
def buildCalcButton(self):
'''Builds the calculate button'''
calcButton = tk.Button(self.master, text='Calculate', command=ss_math.calculate)
calcButton.grid(column=0, row=4, sticky=NE, pady=2)
def buildQuitButton(self):
'''Builds the quit button'''
quitButton = tk.Button(self.master, text='Quit', command=self.quit)
quitButton.grid(column=1, row=4, sticky=NE, pady=2)
def destroy(self):
'''Overrides destroy due to possible exception, forces a hard quit'''
try:
tk.Frame.destroy(self)
except tk.TclError or TypeError:
pass
class augToplevel(tk.Toplevel):
'''Extension of Tkinter.Toplevel to allow for updating an aug on destroy of the popup'''
def __init__(self, master, augNum=None, *args, **kwargs):
tk.Toplevel.__init__(self, master=master)
self.master = master
self.augNum = augNum
def destroy(self):
self.master.updateAug(self.augNum)
tk.Toplevel.withdraw(self)
class itemModToplevel(tk.Toplevel):
'''Extension of Tkinter.Toplevel to allow for updating the iteMod list on destroy of the popup'''
def __init__(self, master, *args, **kwards):
tk.Toplevel.__init__(self, master=master)
self.master = master
def destroy(self):
self.master.updateMods()
tk.Toplevel.withdraw(self)
| |
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
from tensorboard.plugins import projector
from text_rnn import TextRNN
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
OPTION = dh._option(pattern=0)
logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def train_rnn():
"""Training RNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load sentences, labels, and training parameters
logger.info("Loading data...")
logger.info("Data processing...")
train_data = dh.load_data_and_labels(args, args.train_file, word2idx)
val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)
# Build a graph and rnn object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
rnn = TextRNN(
sequence_length=args.pad_seq_len,
vocab_size=len(word2idx),
embedding_type=args.embedding_type,
embedding_size=args.embedding_dim,
lstm_hidden_size=args.lstm_dim,
fc_hidden_size=args.fc_dim,
num_classes=args.num_classes,
l2_reg_lambda=args.l2_lambda,
pretrained_embedding=embedding_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,
global_step=rnn.global_step,
decay_steps=args.decay_steps,
decay_rate=args.decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(rnn.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=rnn.global_step, name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = dh.get_out_dir(OPTION, logger)
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints"))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", rnn.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)
best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)
if OPTION == 'R':
# Load rnn model
logger.info("Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if OPTION == 'T':
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = "embedding"
embedding_conf.metadata_path = args.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt"))
current_step = sess.run(rnn.global_step)
def train_step(batch_data):
"""A single training step."""
x_f, x_b, y_onehot = zip(*batch_data)
feed_dict = {
rnn.input_x_front: x_f,
rnn.input_x_behind: x_b,
rnn.input_y: y_onehot,
rnn.dropout_keep_prob: args.dropout_rate,
rnn.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, rnn.global_step, train_summary_op, rnn.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(val_loader, writer=None):
"""Evaluates model on a validation set."""
batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)
eval_counter, eval_loss = 0, 0.0
true_labels = []
predicted_scores = []
predicted_labels = []
for batch_validation in batches_validation:
x_f, x_b, y_onehot = zip(*batch_validation)
feed_dict = {
rnn.input_x_front: x_f,
rnn.input_x_behind: x_b,
rnn.input_y: y_onehot,
rnn.dropout_keep_prob: 1.0,
rnn.is_training: False
}
step, summaries, predictions, cur_loss = sess.run(
[rnn.global_step, validation_summary_op, rnn.topKPreds, rnn.loss], feed_dict)
# Prepare for calculating metrics
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in predictions[0]:
predicted_scores.append(j[0])
for k in predictions[1]:
predicted_labels.append(k[0])
eval_loss = eval_loss + cur_loss
eval_counter = eval_counter + 1
if writer:
writer.add_summary(summaries, step)
eval_loss = float(eval_loss / eval_counter)
# Calculate Precision & Recall & F1
eval_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
eval_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
eval_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
eval_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
return eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc
# Generate batches
batches_train = dh.batch_iter(list(create_input_data(train_data)), args.batch_size, args.epochs)
num_batches_per_epoch = int((len(train_data['f_pad_seqs']) - 1) / args.batch_size) + 1
# Training loop. For each batch...
for batch_train in batches_train:
train_step(batch_train)
current_step = tf.train.global_step(sess, rnn.global_step)
if current_step % args.evaluate_steps == 0:
logger.info("\nEvaluation:")
eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc = \
validation_step(val_data, writer=validation_summary_writer)
logger.info("All Validation set: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc))
best_saver.handle(eval_acc, sess, current_step)
if current_step % args.checkpoint_steps == 0:
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logger.info("Saved model checkpoint to {0}\n".format(path))
if current_step % num_batches_per_epoch == 0:
current_epoch = current_step // num_batches_per_epoch
logger.info("Epoch {0} has finished!".format(current_epoch))
logger.info("All Done.")
if __name__ == '__main__':
train_rnn()
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']*100
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid litecoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.stop_nodes()
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[3].fundrawtransaction, rawtx, {"feeRate": 1})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains a dialog and widgets related to the module annotation
displaying a list of all pairs (key,value) for a module
QKeyValueDelegate
QModuleAnnotation
QModuleAnnotationTable
"""
from PyQt4 import QtCore, QtGui
from vistrails.core import debug
################################################################################
class QModuleAnnotation(QtGui.QDialog):
"""
QModuleAnnotation is a dialog for annotating modules
"""
def __init__(self, module, controller, parent=None):
"""
QModuleAnnotation(module: Module, controller: VistrailController)
-> None
"""
QtGui.QDialog.__init__(self, parent)
self.module = module
self.controller = controller
self.setModal(True)
self.setWindowTitle('Module Annotations')
self.setLayout(QtGui.QVBoxLayout())
self.layout().setMargin(0)
self.layout().setSpacing(0)
self.scrollArea = QtGui.QScrollArea(self)
self.layout().addWidget(self.scrollArea)
self.scrollArea.setFrameStyle(QtGui.QFrame.NoFrame)
self.annotationTable = QModuleAnnotationTable(self.module,
self.controller,
self)
self.scrollArea.setWidget(self.annotationTable)
self.scrollArea.setWidgetResizable(True)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setMargin(5)
self.closeButton = QtGui.QPushButton('Close', self)
self.closeButton.setFixedWidth(100)
self.buttonLayout.addWidget(self.closeButton)
self.closeButton.setShortcut('Esc')
self.layout().addLayout(self.buttonLayout)
self.connect(self.closeButton, QtCore.SIGNAL('clicked(bool)'), self.close)
class QModuleAnnotationTable(QtGui.QTableWidget):
"""
QModuleAnnotationTable is a table widget that can be dock inside a
window. It has two columns for key and value pairs to view/edit at
run-time
"""
def __init__(self, module=None, controller=None, parent=None):
""" QModuleAnnotationTable(module: Module, controller:
VistrailController, parent: QWidget) -> QModuleAnnotationTable
Construct the 1x2 table
"""
QtGui.QTableWidget.__init__(self, 1, 2, parent)
self.read_only = False
self.setHorizontalHeaderLabels(['Key', 'Value'])
self.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
self.horizontalHeader().setMovable(False)
self.horizontalHeader().setStretchLastSection(True)
self.setSortingEnabled(True)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.verticalHeader().hide()
self.delegate = QKeyValueDelegate(self)
self.setItemDelegate(self.delegate)
self.module = module
self.controller = controller
self.updateLocked = False
self.updateModule()
def set_controller(self, controller):
self.controller = controller
def setReadOnly(self, read_only):
if read_only != self.read_only:
self.read_only = read_only
self.setEnabled(not read_only and self.module is not None)
def updateModule(self, module=None):
""" updateModule() -> None
Update the widget to view the module annotations
"""
self.module = module
self.setSortingEnabled(False)
if self.updateLocked: return
self.clearContents()
self.setRowCount(0)
if self.module:
self.setRowCount(len(self.module.annotations)+1)
curRow = 0
for annotation in self.module.annotations:
if annotation.key == '__desc__':
# We don't display the '__desc__' annotation in the list
# anymore. If it's present we decrease the rowcount by 1
self.setRowCount(len(self.module.annotations))
else:
self.setItem(curRow, 0, QtGui.QTableWidgetItem(annotation.key))
item = QtGui.QTableWidgetItem(annotation.value)
self.setItem(curRow, 1, item)
curRow += 1
self.setEnabled(not self.read_only)
else:
self.setRowCount(1)
self.setEnabled(False)
self.setItem(self.rowCount()-1, 0, QtGui.QTableWidgetItem(''))
self.setItem(self.rowCount()-1, 1, QtGui.QTableWidgetItem(''))
self.setSortingEnabled(True)
def makeItemBold(self, index):
""" makeItemBold(index: QModelIndex) -> None
Make the item at index to have a bold face
"""
oldFont = QtGui.QFont(self.model().data(index, QtCore.Qt.FontRole))
oldFont.setBold(True)
oldFont.setPointSize(20)
self.model().setData(index, oldFont, QtCore.Qt.FontRole)
def lockUpdate(self):
""" lockUpdate() -> None
Do not allow updateModule()
"""
self.updateLocked = True
def unlockUpdate(self):
""" unlockUpdate() -> None
Allow updateModule()
"""
self.updateLocked = False
def addRow(self):
""" addRow() -> None
Adds a new empty row to the table
"""
self.setSortingEnabled(False)
self.resizeRowsToContents()
self.insertRow(self.rowCount())
self.setItem(self.rowCount()-1, 0,
QtGui.QTableWidgetItem(''))
self.setItem(self.rowCount()-1, 1,
QtGui.QTableWidgetItem(''))
self.setSortingEnabled(False)
def editNextAvailableCell(self):
item = self.item(self.rowCount()-1, 0)
self.editItem(item)
class QKeyValueDelegate(QtGui.QItemDelegate):
"""
QKeyValueDelegate tries to create a special control widget
providing a simple interface for adding/deleting module
annotations
"""
def __init__(self, table):
""" QKeyValueDelegate(table: QModuleAnnotationTable) -> QKeyValueDelegate
Save a reference to table and perform a default initialization
"""
self.table = table
QtGui.QItemDelegate.__init__(self, None)
def setEditorData(self, editor, index):
""" setEditorData(editor: QWidget, index: QModelIndex) -> None
Set the current item (at index) data into editor for editting
"""
text = index.data(QtCore.Qt.DisplayRole)
editor.setText(text)
def setModelData(self, editor, model, index):
""" setModelData(editor: QWidget, model: QAbstractItemModel,
index: QModelIndex) -> None
Assign the value of the editor back into the model and emit a
signal to update vistrail
"""
text = str(editor.text())
row = index.row()
col = index.column()
keyItem = self.table.item(row, 0)
if keyItem:
key = str(keyItem.text())
else:
key = ''
valueItem = self.table.item(row, 1)
if valueItem:
value = str(valueItem.text())
else:
value = ''
if col==0:
if text=='' and row<self.table.rowCount()-1:
self.table.removeRow(row)
if self.table.controller and self.table.module:
self.table.lockUpdate()
self.table.controller.delete_annotation(key,
self.table.module.id)
self.table.unlockUpdate()
return
if text!='' and text!=key:
if (self.table.module and
self.table.module.has_annotation_with_key(text)):
if text == '__desc__':
QtGui.QMessageBox.information(None,
"VisTrails",
'Please use "Set Module Label..." menu option'
' to set the label for this module.')
else:
QtGui.QMessageBox.information(None,
"VisTrails",
text + ' already exists in '
'the annotations.')
return
if col==1 and key=='':
QtGui.QMessageBox.information(None,
"VisTrails",
"Must provide a key first.")
return
if col==0 and key=='' and text!='' and value!='':
self.table.addRow()
if col==1:
if text!=value:
if self.table.controller and self.table.module:
self.table.lockUpdate()
self.table.controller.add_annotation((key, text),
self.table.module.id)
self.table.unlockUpdate()
if row == self.table.rowCount()-1:
self.table.addRow()
elif text!='' and self.table.controller and self.table.module:
moduleId = self.table.module.id
self.table.lockUpdate()
self.table.controller.previousModuleIds = [moduleId]
if key!=text and key!='':
self.table.controller.delete_annotation(key, moduleId)
self.table.controller.add_annotation((text, value),
moduleId)
self.table.unlockUpdate()
model.setData(index, text)
| |
import pytest
from unittest.mock import Mock
from dataactbroker.helpers import filters_helper
from dataactcore.models.errorModels import ErrorMetadata, CertifiedErrorMetadata
from dataactcore.models.jobModels import Submission
from dataactcore.models.lookups import PERMISSION_TYPE_DICT, FILE_TYPE_DICT_LETTER_ID, RULE_SEVERITY_DICT
from dataactcore.models.userModel import UserAffiliation
from dataactcore.models.validationModels import RuleSql
from dataactcore.utils.responseException import ResponseException
from tests.unit.dataactcore.factories.domain import CGACFactory, FRECFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactcore.factories.user import UserFactory
def test_agency_filter(database):
sess = database.session
db_objects = []
# Setup agencies
cgac1 = CGACFactory(cgac_code='089', agency_name='CGAC')
cgac2 = CGACFactory(cgac_code='011', agency_name='CGAC Associated with FREC')
cgac3 = CGACFactory(cgac_code='091', agency_name='Other CGAC Associated with FREC')
frec1 = FRECFactory(cgac=cgac2, frec_code='1125', agency_name='FREC 1')
frec2 = FRECFactory(cgac=cgac3, frec_code='0923', agency_name='FREC 2')
db_objects.extend([cgac1, cgac2, cgac3, frec1, frec2])
# Setup submissions
sub1 = SubmissionFactory(cgac_code=cgac1.cgac_code, frec_code=None)
sub2 = SubmissionFactory(cgac_code=cgac2.cgac_code, frec_code=frec1.frec_code)
sub3 = SubmissionFactory(cgac_code=cgac3.cgac_code, frec_code=None)
sub4 = SubmissionFactory(cgac_code=cgac3.cgac_code, frec_code=frec2.frec_code)
db_objects.extend([sub1, sub2, sub3, sub4])
sess.add_all(db_objects)
sess.commit()
base_query = sess.query(Submission)
# no agency list, no filtering
agency_list = []
query = filters_helper.agency_filter(sess, base_query, cgac_model=Submission, frec_model=Submission,
agency_list=agency_list)
expected_results = [sub1, sub2, sub3, sub4]
results = query.all()
assert set(results) == set(expected_results)
# filter for CGACS
agency_list = ['011']
query = filters_helper.agency_filter(sess, base_query, cgac_model=Submission, frec_model=Submission,
agency_list=agency_list)
expected_results = [sub2]
results = list(query.all())
assert results == expected_results
# filter for FRECS
agency_list = ['0923']
query = filters_helper.agency_filter(sess, base_query, cgac_model=Submission, frec_model=Submission,
agency_list=agency_list)
expected_results = [sub4]
results = list(query.all())
assert results == expected_results
# filter for both
agency_list = ['011', '0923']
query = filters_helper.agency_filter(sess, base_query, cgac_model=Submission, frec_model=Submission,
agency_list=agency_list)
expected_results = [sub2, sub4]
results = set(query.all())
assert results == set(expected_results)
# throw in one that doesn't fit the agency format
agency_list = ['089', '1125', '3']
expected_response = 'All codes in the agency_codes filter must be valid agency codes'
with pytest.raises(ResponseException) as resp_except:
filters_helper.agency_filter(sess, base_query, cgac_model=Submission, frec_model=Submission,
agency_list=agency_list)
assert resp_except.value.status == 400
assert str(resp_except.value) == expected_response
# throw in one that just doesn't exist
agency_list = ['089', '1125', '012']
with pytest.raises(ResponseException) as resp_except:
filters_helper.agency_filter(sess, base_query, cgac_model=Submission, frec_model=Submission,
agency_list=agency_list)
assert resp_except.value.status == 400
assert str(resp_except.value) == expected_response
def test_permissions_filter_admin(database, monkeypatch):
sess = database.session
db_objects = []
# Setup admin user
admin_user = UserFactory(name='Administrator', website_admin=True)
db_objects.append(admin_user)
monkeypatch.setattr(filters_helper, 'g', Mock(user=admin_user))
# admin user queries should be identical to the provided query
base_query = sess.query(Submission)
query = filters_helper.permissions_filter(base_query)
assert query == base_query
@pytest.mark.usefixtures("user_constants")
def test_permissions_filter_agency_user(database, monkeypatch):
sess = database.session
# Setup agencies
db_objects = []
cgac1 = CGACFactory(cgac_code='089', agency_name='CGAC')
cgac2 = CGACFactory(cgac_code='011', agency_name='CGAC Associated with FREC')
cgac3 = CGACFactory(cgac_code='091', agency_name='Other CGAC Associated with FREC')
frec1 = FRECFactory(cgac=cgac2, frec_code='1125', agency_name='FREC 1')
frec2 = FRECFactory(cgac=cgac3, frec_code='0923', agency_name='FREC 2')
db_objects.extend([cgac1, cgac2, cgac3, frec1, frec2])
# Setup submissions
sub1 = SubmissionFactory(cgac_code=cgac1.cgac_code, frec_code=None)
sub2 = SubmissionFactory(cgac_code=cgac2.cgac_code, frec_code=frec1.frec_code)
sub3 = SubmissionFactory(cgac_code=cgac3.cgac_code, frec_code=None)
sub4 = SubmissionFactory(cgac_code=cgac3.cgac_code, frec_code=frec2.frec_code)
db_objects.extend([sub1, sub2, sub3, sub4])
# Setup agency user
agency_user = UserFactory(name='Agency User', affiliations=[
UserAffiliation(user_affiliation_id=1, cgac=cgac1, permission_type_id=PERMISSION_TYPE_DICT['reader']),
UserAffiliation(user_affiliation_id=2, cgac=cgac2, frec=frec1,
permission_type_id=PERMISSION_TYPE_DICT['writer']),
])
db_objects.append(agency_user)
monkeypatch.setattr(filters_helper, 'g', Mock(user=agency_user))
sess.add_all(db_objects)
sess.commit()
base_query = sess.query(Submission)
# submissions should be filtered based on user access
query = filters_helper.permissions_filter(base_query)
expected_results = [sub1, sub2]
results = set(query.all())
assert results == set(expected_results)
@pytest.mark.usefixtures('job_constants')
@pytest.mark.usefixtures('validation_constants')
def test_file_filter_rulesql(database):
sess = database.session
# Setup RuleSql
rsql_a = RuleSql(rule_sql='', rule_label='A1', rule_error_message='', query_name='',
file_id=FILE_TYPE_DICT_LETTER_ID['A'], rule_severity_id=RULE_SEVERITY_DICT['fatal'],
rule_cross_file_flag=False, target_file_id=None)
rsql_b = RuleSql(rule_sql='', rule_label='B2', rule_error_message='', query_name='',
file_id=FILE_TYPE_DICT_LETTER_ID['B'], rule_severity_id=RULE_SEVERITY_DICT['fatal'],
rule_cross_file_flag=False, target_file_id=None)
rsql_c = RuleSql(rule_sql='', rule_label='C3', rule_error_message='', query_name='',
file_id=FILE_TYPE_DICT_LETTER_ID['C'], rule_severity_id=RULE_SEVERITY_DICT['fatal'],
rule_cross_file_flag=False, target_file_id=None)
rsql_cross_ab = RuleSql(rule_sql='', rule_label='A4', rule_error_message='', query_name='',
file_id=FILE_TYPE_DICT_LETTER_ID['A'], rule_severity_id=RULE_SEVERITY_DICT['fatal'],
rule_cross_file_flag=True, target_file_id=FILE_TYPE_DICT_LETTER_ID['B'])
rsql_cross_ba = RuleSql(rule_sql='', rule_label='B5', rule_error_message='', query_name='',
file_id=FILE_TYPE_DICT_LETTER_ID['B'], rule_severity_id=RULE_SEVERITY_DICT['fatal'],
rule_cross_file_flag=True, target_file_id=FILE_TYPE_DICT_LETTER_ID['A'])
rsql_cross_bc = RuleSql(rule_sql='', rule_label='B6', rule_error_message='', query_name='',
file_id=FILE_TYPE_DICT_LETTER_ID['B'], rule_severity_id=RULE_SEVERITY_DICT['fatal'],
rule_cross_file_flag=True, target_file_id=FILE_TYPE_DICT_LETTER_ID['C'])
all_rules = [rsql_a, rsql_b, rsql_c, rsql_cross_ab, rsql_cross_ba, rsql_cross_bc]
sess.add_all(all_rules)
sess.commit()
base_query = sess.query(RuleSql)
# no file list, no filtering
files = []
query = filters_helper.file_filter(base_query, RuleSql, files)
expected_results = all_rules
results = query.all()
assert set(results) == set(expected_results)
# filter by single file
files = ['A', 'C']
query = filters_helper.file_filter(base_query, RuleSql, files)
expected_results = [rsql_a, rsql_c]
results = query.all()
assert set(results) == set(expected_results)
# filter by cross file
files = ['cross-AB']
query = filters_helper.file_filter(base_query, RuleSql, files)
expected_results = [rsql_cross_ab, rsql_cross_ba]
results = query.all()
assert set(results) == set(expected_results)
@pytest.mark.usefixtures('job_constants')
@pytest.mark.usefixtures('validation_constants')
def test_file_filter_cert_error_metadata(database):
sess = database.session
# Setup CertifiedErrorMetadata
cem_a = CertifiedErrorMetadata(original_rule_label='A1', file_type_id=FILE_TYPE_DICT_LETTER_ID['A'],
severity_id=RULE_SEVERITY_DICT['fatal'], target_file_type_id=None)
cem_b = CertifiedErrorMetadata(original_rule_label='B2', file_type_id=FILE_TYPE_DICT_LETTER_ID['B'],
severity_id=RULE_SEVERITY_DICT['fatal'], target_file_type_id=None)
cem_c = CertifiedErrorMetadata(original_rule_label='C3', file_type_id=FILE_TYPE_DICT_LETTER_ID['C'],
severity_id=RULE_SEVERITY_DICT['fatal'], target_file_type_id=None)
cem_cross_ab = CertifiedErrorMetadata(original_rule_label='A4', file_type_id=FILE_TYPE_DICT_LETTER_ID['A'],
severity_id=RULE_SEVERITY_DICT['fatal'],
target_file_type_id=FILE_TYPE_DICT_LETTER_ID['B'])
cem_cross_ba = CertifiedErrorMetadata(original_rule_label='B5', file_type_id=FILE_TYPE_DICT_LETTER_ID['B'],
severity_id=RULE_SEVERITY_DICT['fatal'],
target_file_type_id=FILE_TYPE_DICT_LETTER_ID['A'])
cem_cross_bc = CertifiedErrorMetadata(original_rule_label='B6', file_type_id=FILE_TYPE_DICT_LETTER_ID['B'],
severity_id=RULE_SEVERITY_DICT['fatal'],
target_file_type_id=FILE_TYPE_DICT_LETTER_ID['C'])
all_cems = [cem_a, cem_b, cem_c, cem_cross_ab, cem_cross_ba, cem_cross_bc]
sess.add_all(all_cems)
sess.commit()
base_query = sess.query(CertifiedErrorMetadata)
# no file list, no filtering
files = []
query = filters_helper.file_filter(base_query, CertifiedErrorMetadata, files)
expected_results = all_cems
results = query.all()
assert set(results) == set(expected_results)
# filter by single file
files = ['A', 'C']
query = filters_helper.file_filter(base_query, CertifiedErrorMetadata, files)
expected_results = [cem_a, cem_c]
results = query.all()
assert set(results) == set(expected_results)
# filter by cross file
files = ['cross-AB']
query = filters_helper.file_filter(base_query, CertifiedErrorMetadata, files)
expected_results = [cem_cross_ab, cem_cross_ba]
results = query.all()
assert set(results) == set(expected_results)
def test_file_filter_wrong_file_model(database):
sess = database.session
base_query = sess.query(CertifiedErrorMetadata)
# should break cause
error_text = 'Invalid file model. Use one of the following instead: CertifiedErrorMetadata, ErrorMetadata, ' \
'RuleSetting, RuleSql.'
with pytest.raises(ResponseException) as resp_except:
filters_helper.file_filter(base_query, Submission, [])
assert str(resp_except.value) == error_text
@pytest.mark.usefixtures('validation_constants')
def test_rule_severity_filter(database):
sess = database.session
# Setup ErrorMetadata
error = ErrorMetadata(severity_id=RULE_SEVERITY_DICT['fatal'])
warning = ErrorMetadata(severity_id=RULE_SEVERITY_DICT['warning'])
sess.add_all([error, warning])
sess.commit()
# Ensure the filter is working correctly
base_query = sess.query(ErrorMetadata)
err_query = filters_helper.rule_severity_filter(base_query, 'error', ErrorMetadata)
warning_query = filters_helper.rule_severity_filter(base_query, 'warning', ErrorMetadata)
mixed_query = filters_helper.rule_severity_filter(base_query, 'mixed', ErrorMetadata)
assert err_query.first() == error
assert warning_query.first() == warning
assert mixed_query.count() == 2
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# python standard library
#
import unittest
from functools import partial
##
# test helper
#
from testutils import mock
##
# pycomber modules
#
from pycomber.strategies import MergeAbstract, MergeList, MergeListOverride, \
MergeSet, MergeSetOverride, MergeTuple, MergeTupleOverride, MergeDict, \
MergeDictOverride, MergePrimitives, MergeNone
class MergeTestMixin(object):
def setUp(self):
self.manager = mock.Mock(side_effect = lambda a, b=None: a)
self.merger = self.merger_class(self.manager)
def test_init_requires_one_argument(self):
self.assertRaises(TypeError, self.merger_class)
err = False
try:
self.merger_class(None)
except AttributeError:
err = True
self.assertFalse(err)
def test_instance_is_callable(self):
self.assertTrue(hasattr(self.merger, '__call__'))
def test_call_expects_2_arguments(self):
self.assertRaises(TypeError, self.merger)
self.assertRaises(TypeError, partial(self.merger, None))
err=False
try:
self.merger(None, None)
except (TypeError,) as e:
err='takes exactly ' in e.args[0]
except:
pass
self.assertFalse(err)
self.assertRaises(TypeError, partial(self.merger, None, None, None))
class MergeAbstractTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeAbstract
MergeTestMixin.setUp(self)
def test_call_must_be_implemented(self):
self.assertRaises(RuntimeError, partial(self.merger, None, None))
class MergeNoneTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeNone
MergeTestMixin.setUp(self)
def test_non_None_arguments_is_returned(self):
self.assertEqual(self.merger(None, 'b'), 'b')
self.assertEqual(self.merger('a', None), 'a')
self.assertEqual(self.merger('a', 'b'), 'a')
self.assertEqual(self.merger(None, None), None)
class MergePrimitivesTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergePrimitives
MergeTestMixin.setUp(self)
def test_merge_from_is_alwasy_returned(self):
self.assertEqual(self.merger(None, 'b'), None)
self.assertEqual(self.merger('a', None), 'a')
self.assertEqual(self.merger('a', 'b'), 'a')
self.assertEqual(self.merger(None, None), None)
class MergeListTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeList
MergeTestMixin.setUp(self)
def test_merge_generates_union_of_two_list(self):
self.assertEqual(list(self.merger([1], [2])), [1, 2])
def test_merge_returns_unique_values(self):
self.assertEqual(list(self.merger([1, 2], [2, 3])), [1, 2, 3])
d = {'a': 1}
self.assertEqual(list(self.merger([1, d], [d, 2])), [1, 2, d])
self.assertEqual(list(self.merger([1, 2, d], [d, 2])), [1, 2, d])
def test_calls_merge_manager_for_each_value(self):
d = {'a': 1}
list(self.merger([1, 2, d], [2, 3]))
self.assertEqual(self.manager.call_count, 4)
class MergeListOverrideTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeListOverride
MergeTestMixin.setUp(self)
def test_merge_returns_generator(self):
self.assertFalse(isinstance(self.merger([1,2], [3]), list))
def test_merge_overrides_second_list_with_items_from_first_one(self):
self.assertEqual(list(self.merger([1], [2])), [1])
def test_merge_returns_unique_values(self):
self.assertEqual(list(self.merger([1, 2, 2], [2, 3])), [1, 2])
d = {'a': 1}
self.assertEqual(list(self.merger([1, d, d], [d, 2])), [1, d])
self.assertEqual(list(self.merger([1, 2, d, 2, d], [d, 3])), [1, 2, d])
def test_calls_merge_manager_for_each_value(self):
d = {'a': 1}
list(self.merger([1, 2, d], [2, 3]))
self.assertEqual(self.manager.call_count, 3)
class MergeSetTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeSet
MergeTestMixin.setUp(self)
def test_merge_generates_union_of_two_sets(self):
self.assertEqual(self.merger(set([1, 2]), set([2, 3])), set([1, 2, 3]))
self.assertEqual(self.merger(set([1, 2]), set([3])), set([1, 2, 3]))
def test_calls_merge_manager_for_each_key(self):
self.merger(set([1, 2]), set([2, 3]))
self.assertEqual(self.manager.call_count, 3)
class MergeSetOverrideTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeSetOverride
MergeTestMixin.setUp(self)
def test_merge_overrides_second_set_with_first_one(self):
self.assertEqual(self.merger(set([1, 2]), set([2, 3])), set([1, 2]))
self.assertEqual(self.merger(set([1, 2]), set([3])), set([1, 2]))
def test_calls_merge_manager_for_each_key(self):
self.merger(set([1, 2]), set([2, 3]))
self.assertEqual(self.manager.call_count, 2)
class MergeTupleTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeTuple
MergeTestMixin.setUp(self)
def test_merge_generates_union_of_two_tuples(self):
self.assertEqual(self.merger((1, 2), (2, 3)), (1, 2, 3))
self.assertEqual(self.merger((1, 2), (3,)), (1, 2, 3))
def test_calls_merge_manager_for_each_key(self):
self.merger((1, 2), (2, 3))
self.assertEqual(self.manager.call_count, 3)
class MergeTupleOverrideTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeTupleOverride
MergeTestMixin.setUp(self)
def test_merge_overrides_second_tuple_with_first_one(self):
self.assertEqual(self.merger((1, 2), (2, 3)), (1, 2))
self.assertEqual(self.merger((1, 2), (3,)), (1, 2))
def test_calls_merge_manager_for_each_key(self):
self.merger((1, 2), (2, 3))
self.assertEqual(self.manager.call_count, 2)
class MergeDictTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeDict
MergeTestMixin.setUp(self)
def test_merge_generates_union_of_two_dicts(self):
self.assertEqual(self.merger({'a': 1}, {'b': 1}), {'a': 1, 'b': 1})
self.assertEqual(self.merger({'a': 1}, {'a': 2}), {'a': 1})
def test_merge_of_values_depends_on_merge_manager_configuration(self):
self.assertEqual(self.merger({'a': 1}, {'a': 2}), {'a': 1})
self.manager.side_effect = lambda a, b: b
self.assertEqual(self.merger({'a': 1}, {'a': 2}), {'a': 2})
def test_calls_merge_manager_for_each_key(self):
self.merger({'a': 1}, {'a': 2})
self.assertEqual(self.manager.call_count, 1)
self.manager.reset_mock()
self.merger({'a': 1}, {'b': 2})
self.assertEqual(self.manager.call_count, 2)
class MergeDictOverrideTestCase(unittest.TestCase, MergeTestMixin):
def setUp(self):
self.merger_class = MergeDictOverride
MergeTestMixin.setUp(self)
def test_merge_overrides_values_from_second_dict_with_first_one(self):
self.assertEqual(self.merger({'c': 3}, {'d': 4}), {'c': 3})
self.assertEqual(self.merger({'a': 1}, {'a': 2}), {'a': 1})
def test_calls_merge_manager_for_each_key(self):
self.merger({'a': 1}, {'a': 2})
self.assertEqual(self.manager.call_count, 1)
self.manager.reset_mock()
self.merger({'a': 1}, {'b': 2})
self.assertEqual(self.manager.call_count, 1)
if "__main__" == __name__:
unittest.main()
| |
"""Fabric file."""
from __future__ import print_function, with_statement
import errno
import os
import shutil
from contextlib import contextmanager
from fileinput import FileInput
from sys import version_info
from uuid import uuid4
from invoke import task
###############################################################################
# Constants
###############################################################################
ROOT_DIR = os.path.dirname(__file__)
MOD = "cloud_browser"
PROJ = "cloud_browser_project"
PROJ_SETTINGS = ".".join((PROJ, "settings"))
DEV_DB_DIR = os.path.join(PROJ, "db")
CHECK_INCLUDES = ("tasks.py", "setup.py", MOD, PROJ)
PYLINT_CFG = os.path.join("dev", "pylint.cfg")
FLAKE8_CFG = os.path.join("dev", "flake8.cfg")
ISORT_CFG = os.path.join("dev", ".isort.cfg")
DOC_INPUT = "doc"
DOC_OUTPUT = "doc_html"
DOC_UNDERSCORE_DIRS = ("sources", "static")
DOC_BRANCH = "gh-pages"
GITHUB_USER = "ryan-roemer"
GITHUB_REPO = "django-cloud-browser"
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
DOCKER_USER = "cwolff"
DOCKER_IMAGE = "django-cloud-browser"
DOCKER_PASSWORD = os.environ.get("DOCKER_PASSWORD")
BUILD_VERSION = os.environ.get("BUILD_VERSION")
BUILD_DIRS = ("dist", "django_cloud_browser.egg-info")
SDIST_RST_FILES = ("INSTALL.rst", "README.rst", "CHANGES.rst")
SDIST_TXT_FILES = [os.path.splitext(x)[0] + ".txt" for x in SDIST_RST_FILES]
MANAGE = os.path.join(PROJ, "manage.py")
try:
SERVER_ADDRESS = "%s:%s" % (os.environ["HOST"], os.environ["PORT"])
except KeyError:
SERVER_ADDRESS = "127.0.0.1:8000"
###############################################################################
# Build
###############################################################################
@task
def clean(_):
"""Clean build files."""
for build_dir in list(BUILD_DIRS) + [DOC_OUTPUT, DEV_DB_DIR]:
try:
shutil.rmtree(build_dir)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
@contextmanager
def _dist_wrapper():
"""Add temporary distribution build files (and then clean up)."""
try:
# Copy select *.rst files to *.txt for build.
for rst_file, txt_file in zip(SDIST_RST_FILES, SDIST_TXT_FILES):
shutil.copy(rst_file, txt_file)
# Perform action.
yield
finally:
# Clean up temp *.txt files.
for rst_file in SDIST_TXT_FILES:
os.remove(rst_file)
@contextmanager
def _update_version(context, version):
if not version:
yield
return
major, minor, patch = version.strip().split(".")
version_file = os.path.join(ROOT_DIR, MOD, "__init__.py")
fobj = FileInput(version_file, inplace=True)
try:
for line in fobj:
if line.startswith("VERSION = ("):
print("VERSION = (%s, %s, %s)" % (major, minor, patch))
else:
print(line)
finally:
fobj.close()
yield
context.run("git checkout %s" % version_file)
@task
def sdist(context, version=BUILD_VERSION):
"""Package into distribution."""
with _update_version(context, version), _dist_wrapper():
context.run("python setup.py sdist")
@task
def register(context):
"""Register and prep user for PyPi upload.
.. note:: May need to tweak ~/.pypirc file per issue:
http://stackoverflow.com/questions/1569315
"""
with _dist_wrapper():
context.run("python setup.py register")
@task
def publish_pypi(context):
"""Upload package."""
context.run("twine upload dist/*")
###############################################################################
# Quality
###############################################################################
@task
def pylint(context, rcfile=PYLINT_CFG):
"""Run pylint style checker."""
# Have a spurious DeprecationWarning in pylint.
context.run("pylint --rcfile=%s %s" % (rcfile, " ".join(CHECK_INCLUDES)))
@task
def flake8(context, rcfile=FLAKE8_CFG):
"""Run flake8 style checker."""
context.run("flake8 --config=%s %s" % (rcfile, " ".join(CHECK_INCLUDES)))
@task
def isort(context, rcfile=ISORT_CFG):
"""Run isort style checker."""
# use dirname until https://github.com/timothycrosley/isort/issues/710 is resolved
rcfile = os.path.dirname(rcfile)
context.run(
"isort --recursive --check-only --settings-path=%s %s"
% (rcfile, " ".join(CHECK_INCLUDES))
)
@task
def black(context):
"""Run black style checker."""
if version_info >= (3, 6, 0):
context.run("black --check %s" % (" ".join(CHECK_INCLUDES)))
@task(flake8, isort, black, pylint)
def check(_):
"""Run all checkers."""
pass
###############################################################################
# Documentation
###############################################################################
def _touch(file_path):
with open(file_path, "wb") as fobj:
fobj.write(b"")
return fobj.name
@task
def docs(
context, output=DOC_OUTPUT, proj_settings=PROJ_SETTINGS, version=BUILD_VERSION
):
"""Generate API documentation (using Sphinx)."""
os.environ["PYTHONPATH"] = ROOT_DIR
os.environ["DJANGO_SETTINGS_MODULE"] = proj_settings
with _update_version(context, version):
context.run("sphinx-build -b html %s %s" % (DOC_INPUT, output))
@task
def publish_docs(
context,
from_folder=DOC_OUTPUT,
to_branch=DOC_BRANCH,
github_token=GITHUB_TOKEN,
github_user=GITHUB_USER,
github_repo=GITHUB_REPO,
):
_touch(os.path.join(DOC_OUTPUT, ".nojekyll"))
temp_remote = "publish-%s" % uuid4()
context.run(
"git remote add %s https://%s@github.com/%s/%s"
% (temp_remote, github_token, github_user, github_repo)
)
context.run(
"gh-pages --dotfiles --dist %s --branch %s --remote %s"
% (from_folder, to_branch, temp_remote)
)
context.run("git remote rm %s" % temp_remote)
@task
def publish_docker(
context, user=DOCKER_USER, image=DOCKER_IMAGE, version=BUILD_VERSION
):
context.run("docker login -u %s -p %s" % (user, DOCKER_PASSWORD))
for tag in version, "latest":
image_name = "%s/%s:%s" % (user, image, tag)
context.run("docker build -t %s ." % image_name)
context.run("docker push %s" % image_name)
###############################################################################
# Django Targets
###############################################################################
def _manage(context, target, extra=""):
"""Generic wrapper for ``manage.py``."""
os.environ["PYTHONPATH"] = ROOT_DIR
os.environ["PYTHONUNBUFFERED"] = "1"
context.run("python %s %s %s" % (MANAGE, target, extra))
@task
def syncdb(context):
"""Run syncdb."""
try:
os.makedirs(DEV_DB_DIR)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
_manage(context, "syncdb", "--noinput")
@task
def run_server(context, addr=SERVER_ADDRESS):
"""Run Django dev. server."""
_manage(context, "runserver", addr)
| |
def assign_zp(filt,pars,zps):
#print filt, zps
if filt in zps:
out = pars[zps[filt]]
else:
out = 0
return out
def get_locus():
import pickle
f = open('newlocus','r')
m = pickle.Unpickler(f)
locus = m.load()
return locus
def mkcolorcolor(filt,catalog,starcatalog,cluster,magtype,save_file=None):
print filt
import cutout_bpz
locus_c = cutout_bpz.locus()
#locus_c = cutout_bpz.locus()
import os
base = os.environ['sne'] + '/photoz/' + cluster + '/'
f = open(base + 'stars.html','w')
print filt
filt.sort(cutout_bpz.sort_filters)
print filt
raw_input()
''' group filters '''
groups = {}
for filter2 in filt:
num = cutout_bpz.filt_num(filter2)
if not num in groups:
groups[num] = []
groups[num].append(filter2)
print groups
print catalog
import random, pyfits
print catalog, starcatalog
p = pyfits.open(catalog)['OBJECTS'].data
s = pyfits.open(starcatalog)
indices = s['OBJECTS'].data.field('SeqNr')
dict_obj = {}
for index in indices:
p.field('CLASS_STAR')[index-1] = -999
mask = p.field('CLASS_STAR') == -999
p = p[mask]
print len(p)
#mask = p.field('FWHM_WORLD')*3600. < 1.1
#p = p[mask]
print len(p)
#while not plot:
list = []
for g in sorted(groups.keys()):
list.append(groups[g])
print list
l_new = []
l_fs = {}
locus_dict_obj = {}
print list
for filt in list:
for f2 in filt:
a_short = f2.replace('+','').replace('C','')[-1]
print filt, a_short
import string
ok = True
if string.find(f2,'MEGAPRIME') != -1:
a_short = 'MP' + a_short.upper() + 'SUBARU'
elif string.find(f2,'SUBARU') != -1:
if string.find(f2,"W-S-") != -1:
a_short = 'WS' + a_short.upper() + 'SUBARU'
else:
a_short = a_short.upper() + 'JOHN'
if string.find(f2,"-1-") == -1:
ok = False
if ok:
l_new.append([filt,a_short])
l_fs[a_short] = 'yes'
print a_short, filt
if not a_short in locus_dict_obj: locus_dict_obj[a_short] = []
locus_dict_obj[a_short].append(f2)
print locus_dict_obj
print l_fs, 'l_fs'
raw_input()
import re
good_fs = []
for k1 in locus_c.keys():
res = re.split('_',k1)
print l_fs
print res
if l_fs.has_key(res[0]) and l_fs.has_key(res[1]):
good_fs.append([res[0],res[1]])
print l_fs
print locus_dict_obj, good_fs
print good_fs
raw_input()
zps_dict_obj = {}
list = ['MPUSUBARU','VJOHN','RJOHN'] #,'RJOHN','IJOHN','WSZSUBARU','WSISUBARU']
print good_fs
#good_fs = [['BJOHN','RJOHN'],['VJOHN','RJOHN']]
complist = []
for f1A,f1B in good_fs:
if f1A != 'MPUSUBARU' and f1B != 'MPUSUBARU': # True: #filter(lambda x: x==f1A, list) and filter(lambda x: x==f1B, list):
#if True: #filter(lambda x: x==f1A, list) and filter(lambda x: x==f1B, list):
zps_dict_obj[f1A] = 0
zps_dict_obj[f1B] = 0
import random
for a in locus_dict_obj[f1A]:
for b in locus_dict_obj[f1B]:
complist.append([[a,f1A],[b,f1B]])
print complist
print good_fs
print zps_dict_obj
raw_input()
#complist = complist[0:3]
print complist, 'complist'
raw_input()
zps_list_full = zps_dict_obj.keys()
zps_list = zps_list_full[1:]
zps ={}
zps_rev ={}
for i in range(len(zps_list)):
zps[zps_list[i]] = i
zps_rev[i] = zps_list[i]
table = p
loci = len(locus_c[locus_c.keys()[0]])
print loci
stars = len(table.field('MAG_' + magtype + '-' + complist[0][0][0]))
locus_list = []
for j in range(len(locus_c[locus_c.keys()[0]])):
o = []
for c in complist:
o.append(locus_c[c[0][1] + '_' + c[1][1]][j])
locus_list.append(o)
import scipy
results = {}
for iteration in ['full']: #,'bootstrap1','bootstrap2','bootstrap3','bootstrap4']:
''' make matrix with a locus for each star '''
locus_matrix = scipy.array(stars*[locus_list])
print locus_matrix.shape
''' assemble matricies to make colors '''
A_band = scipy.swapaxes(scipy.swapaxes(scipy.array(loci*[[table.field('MAG_' + magtype + '-' + a[0][0]) for a in complist]]),0,2),1,2)
B_band = scipy.swapaxes(scipy.swapaxes(scipy.array(loci*[[table.field('MAG_' + magtype + '-' + a[1][0]) for a in complist]]),0,2),1,2)
A_err = scipy.swapaxes(scipy.swapaxes(scipy.array(loci*[[table.field('MAGERR_' + magtype + '-' + a[0][0]) for a in complist]]),0,2),1,2)
B_err = scipy.swapaxes(scipy.swapaxes(scipy.array(loci*[[table.field('MAGERR_' + magtype + '-' + a[1][0]) for a in complist]]),0,2),1,2)
print A_err.shape
A_band[A_err > 0.3] = -99
B_band[B_err > 0.3] = -99
''' make matrix specifying good values '''
good = scipy.ones(A_band.shape)
good[A_band == -99] = 0
good[B_band == -99] = 0
good = good[:,0,:]
good_test = good.sum(axis=1) # sum all of the good measurements for any given star
print sorted(good_test)
print good_test
''' figure out the cut-off '''
cut_off = sorted(good_test)[-20] -1
print cut_off
A_band = A_band[good_test>cut_off]
B_band = B_band[good_test>cut_off]
A_err = A_err[good_test>cut_off]
B_err = B_err[good_test>cut_off]
locus_matrix = locus_matrix[good_test>cut_off]
if string.find(iteration,'bootstrap') != -1:
length = len(A_band)
randvec = scipy.array([random.random() for ww in range(length)])
fraction = 0.5
mask = randvec < (fraction)
A_band = A_band[mask]
B_band = B_band[mask]
A_err = A_err[mask]
B_err = B_err[mask]
locus_matrix = locus_matrix[mask]
colors = A_band - B_band
colors_err = (A_err**2. + B_err**2.)**0.5
colors_err[A_band == -99] = 1000000.
colors_err[B_band == -99] = 1000000.
colors[A_band == -99] = 0.
colors[B_band == -99] = 0.
print colors.shape, locus_matrix.shape
from copy import copy
print good_test
#colors = colors[good_test > 1]
#colors_err = colors_err[good_test > 1]
#locus_matrix = locus_matrix[good_test > 1]
stars_good = len(locus_matrix)
good = scipy.ones(A_band.shape)
good[A_band == -99] = 0
good[B_band == -99] = 0
print good.sum(axis=2).sum(axis=1).sum(axis=0)
#raw_input()
#raw_input()
#good = good[:,0,:]
good_test = good[:,0,:].sum(axis=1)
good = good[good_test > 1]
star_mag_num = good[:,0,:].sum(axis=1)
def errfunc(pars,residuals=False):
stat_tot = 0
#for i in range(len(table.field('MAG_' + magtype + '-' + complist[0][0][0])[:100])):
#print i
#print 'MAG_' + magtype + '-' + a
#print a,b
#print table.field('MAG_ISO-' + a)
#print magtype, 'MAG_' + magtype + '-' + a
if 1:
A_zp = scipy.swapaxes(scipy.swapaxes(scipy.array(loci*[stars_good*[[assign_zp(a[0][1],pars,zps) for a in complist]]]),0,1),0,0)
B_zp = scipy.swapaxes(scipy.swapaxes(scipy.array(loci*[stars_good*[[assign_zp(a[1][1],pars,zps) for a in complist]]]),0,1),0,0)
#print A_zp.shape
#raw_input()
colors_zp = A_zp- B_zp
#print colors_zp.shape
#print locus_matrix.shape
#print colors.shape
#print colors_zp[0][:][0]
#print colors[2][0], colors.shape
#print locus_matrix[2][0], locus_matrix.shape
print colors_zp.shape, colors.shape, locus_matrix.shape
ds_prelim = ((colors - locus_matrix - colors_zp)**2.)
ds_prelim[good == 0] = 0.
#print ds_prelim[2][0], 'ds_prelim'
#raw_input()
ds = (ds_prelim.sum(axis=2))**0.5
#print ds[2][0]
#raw_input()
''' formula from High 2009 '''
dotprod = abs((colors - locus_matrix - colors_zp) * colors_err)
dotprod[good == 0] = 0. # set error to zero for poor measurements not in fit
dotprod_sum = dotprod.sum(axis=2)
sum_diff = ds**2./dotprod_sum
#sum_diff = ds / colors_err
#print sum_diff[2], 'sum_diff'
#print c_diff[2][0], 'c_diff'
#raw_input()
dist = ds.min(axis=1)
select_diff = sum_diff.min(axis=1)
#print select_diff, 'select_diff'
#raw_input()
#select_diff_norm = select_diff #/star_mag_num
#print select_diff_norm, 'select_diff_norm'
#raw_input()
stat_tot = select_diff.sum()
#print stat_tot, 'stat_tot'
#raw_input()
import pylab
#pylab.clf()
#pylab.scatter((colors - colors_zp)[:,0,0],(colors - colors_zp)[:,0,1])
#pylab.scatter(locus_matrix[0,:,0],locus_matrix[0,:,1],color='red')
#pylab.show()
print pars, stat_tot#, zps
print zps_list_full
if residuals: return select_diff, dist
else: return stat_tot
import pylab
#pylab.ion()
''' now rerun after cutting outliers '''
if True:
pinit = scipy.zeros(len(zps_list))
from scipy import optimize
out = scipy.optimize.fmin(errfunc,pinit,args=())
print out
import scipy
print zps_list
print 'starting'
print out
residuals,dist = errfunc(pars=[0.] + out,residuals=True)
#out = [0., -0.16945683, -0.04595967, 0.06188451, 0.03366916]
print dist
print 'finished'
print 'colors' , len(colors)
''' first filter on distance '''
colors = colors[dist < 1]
colors_err = colors_err[dist < 1]
locus_matrix = locus_matrix[dist < 1]
good = good[dist < 1]
residuals = residuals[dist < 1]
''' filter on residuals '''
colors = colors[residuals < 6]
colors_err = colors_err[residuals < 6]
locus_matrix = locus_matrix[residuals < 6]
good = good[residuals < 6]
stars_good = len(locus_matrix)
star_mag_num = good[:,0,:].sum(axis=1)
print 'colors' , len(colors)
#raw_input()
pinit = scipy.zeros(len(zps_list))
from scipy import optimize
out = scipy.optimize.fmin(errfunc,pinit,args=())
print out
results[iteration] = dict(zip(zps_list_full,([0.] + out.tolist())))
print results
errors = {}
import scipy
print 'BOOTSTRAPPING ERRORS:'
for key in zps_list_full:
l = []
for r in results.keys():
if r != 'full':
l.append(results[r][key])
print key+':', scipy.std(l), 'mag'
errors[key] = scipy.std(l)
def save_results(save_file,results,errors):
f = open(save_file,'w')
for key in results['full'].keys():
f.write(key + ' ' + str(results['full'][key]) + ' +- ' + str(errors[key]) + '\n')
f.close()
import pickle
f = open(save_file + '.pickle','w')
m = pickle.Pickler(f)
pickle.dump({'results':results,'errors':errors},m)
f.close()
if results.has_key('full') and save_results is not None: save_results(save_file,results, errors)
return results
#pylab.show()
def temp():
if 1:
if 1:
if 1:
if 1:
if 1:
if 1:
#a = locus_dict_obj[f1A]
#b = locus_dict_obj[f1B]
#c = locus_dict_obj[f2A]
#d = locus_dict_obj[f2B]
print a,b,c,d
import string
def fix(q):
if string.find(q,'MEGA') != -1:
import re
res = re.split('-',q)
q = 'MEGAPRIME-0-1-' + res[-1]
print q
return q
#print catalog , starcatalog
#px = pickles.field(fix(a)) - pickles.field(fix(b))
#py = pickles.field(fix(b)) - pickles.field(fix(c))
#print px,py
import pylab
pylab.clf()
if 0: #a==b or ==f2B:
print a,b,c,d
print f1A,f1B,f2A,f2B
#pylab.savefig(outbase + '/RedshiftErrors.png')
table = p
print 'MAG_' + magtype + '-' + a
print a,b,c
#print table.field('MAG_ISO-' + a)
print magtype, 'MAG_' + magtype + '-' + a
at = table.field('MAG_' + magtype + '-' + a)[:]
bt = table.field('MAG_' + magtype + '-' + b)[:]
ct = table.field('MAG_' + magtype + '-' + c)[:]
dt = table.field('MAG_' + magtype + '-' + d)[:]
bt = bt[at!=-99]
ct = ct[at!=-99]
dt = dt[at!=-99]
at = at[at!=-99]
at = at[bt!=-99]
ct = ct[bt!=-99]
dt = dt[bt!=-99]
bt = bt[bt!=-99]
at = at[ct!=-99]
bt = bt[ct!=-99]
dt = dt[ct!=-99]
ct = ct[ct!=-99]
at = at[dt!=-99]
bt = bt[dt!=-99]
ct = ct[dt!=-99]
dt = dt[dt!=-99]
if len(at) and len(bt) and len(ct) and len(dt) and len(locus_c[f1A + '_' + f1B])==len(locus_c[f2A + '_' + f2B]):
x = at - bt
y = ct -dt
x = x[:]
y = y[:]
print x[0:100], y[0:100]
pylab.clf()
pylab.xlabel(a + ' - ' + b)
pylab.ylabel(c + ' - ' + d)
pylab.scatter(x,y,s=1)
pylab.scatter(locus_c[f1A + '_' + f1B],locus_c[f2A + '_' + f2B],color='red',s=1.)
print len(x), 'x'
#pylab.axis([sorted(x)[5],sorted(x)[-5],sorted(y)[5
file = a+ b+ c+ d + '.png'
print
pylab.savefig(base + file)
#pylab.show()
pylab.clf()
f.write('<img src=' + file + '>\n')
plot = False
print len(at), len(bt), len(ct), len(dt), len(locus_c[f1A + '_' + f1B])==len(locus_c[f2A + '_' + f2B]), f1A, f2A, f1B, f2B
def run(cluster):
ratio = []
import astropy.io.fits as pyfits, cutout_bpz
import os
import os, sys, bashreader, commands
from utilities import *
from config_bonn import appendix, tag, arc, filters, filter_root, appendix_root
type='all'
magtype = 'APER1'
if len(sys.argv) > 2:
for s in sys.argv:
if s == 'spec':
type = 'spec'
elif s == 'rand':
type = 'rand'
elif s == 'all':
type = 'all'
elif s == 'picks':
type = 'picks'
elif s == 'ISO':
magtype = 'ISO'
elif s == 'APER1':
magtype = 'APER1'
print cluster
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%s/' % cluster
filecommand = open('record.analysis','w')
BASE="coadd"
image = BASE + '.fits'
from glob import glob
images = []
filters.reverse()
print filters
ims = {}
ims_seg = {}
params = {'path':path,
'filter_root': filter_root,
'cluster':cluster,
'appendix':appendix, }
catalog = '%(path)s/PHOTOMETRY/%(cluster)s.slr.cat' %params
starcatalog = '%(path)s/PHOTOMETRY/%(cluster)s.stars.calibrated.cat' %params
save_file = '%(path)s/PHOTOMETRY/pat_slr.calib' %params
import do_multiple_photoz, os
reload(do_multiple_photoz)
filterlist = do_multiple_photoz.get_filters(catalog,'OBJECTS')
print filterlist
filters = cutout_bpz.conv_filt(filterlist)
y = {}
for f in filters:
y[f] = 'yes'
filters = y.keys()
filters.sort(cutout_bpz.sort_filters)
print filters
os.system('mkdir -p ' + os.environ['sne'] + '/photoz/' + cluster)
mkcolorcolor(filterlist,catalog,starcatalog,cluster,magtype,save_file)
if __name__ == '__main__':
import sys
cluster = sys.argv[1]
run(cluster)
| |
"""Access Ansible Core CI remote services."""
from __future__ import absolute_import, print_function
import json
import os
import traceback
import uuid
import errno
import time
import shutil
from lib.http import (
HttpClient,
HttpResponse,
HttpError,
)
from lib.util import (
ApplicationError,
run_command,
make_dirs,
display,
is_shippable,
)
from lib.config import (
EnvironmentConfig,
)
AWS_ENDPOINTS = {
'us-east-1': 'https://14blg63h2i.execute-api.us-east-1.amazonaws.com',
'us-east-2': 'https://g5xynwbk96.execute-api.us-east-2.amazonaws.com',
}
class AnsibleCoreCI(object):
"""Client for Ansible Core CI services."""
def __init__(self, args, platform, version, stage='prod', persist=True, load=True, name=None, provider=None):
"""
:type args: EnvironmentConfig
:type platform: str
:type version: str
:type stage: str
:type persist: bool
:type load: bool
:type name: str
"""
self.args = args
self.platform = platform
self.version = version
self.stage = stage
self.client = HttpClient(args)
self.connection = None
self.instance_id = None
self.endpoint = None
self.max_threshold = 1
self.name = name if name else '%s-%s' % (self.platform, self.version)
self.ci_key = os.path.expanduser('~/.ansible-core-ci.key')
self.resource = 'jobs'
# Assign each supported platform to one provider.
# This is used to determine the provider from the platform when no provider is specified.
providers = dict(
aws=(
'aws',
'windows',
'freebsd',
'vyos',
'junos',
'ios',
'tower',
'rhel',
),
azure=(
'azure',
),
parallels=(
'osx',
),
)
if provider:
# override default provider selection (not all combinations are valid)
self.provider = provider
else:
for candidate in providers:
if platform in providers[candidate]:
# assign default provider based on platform
self.provider = candidate
break
for candidate in providers:
if '%s/%s' % (platform, version) in providers[candidate]:
# assign default provider based on platform and version
self.provider = candidate
break
self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage))
if self.provider in ('aws', 'azure'):
if self.provider != 'aws':
self.resource = self.provider
if args.remote_aws_region:
# permit command-line override of region selection
region = args.remote_aws_region
# use a dedicated CI key when overriding the region selection
self.ci_key += '.%s' % args.remote_aws_region
elif is_shippable():
# split Shippable jobs across multiple regions to maximize use of launch credits
if self.platform == 'windows':
region = 'us-east-2'
else:
region = 'us-east-1'
else:
# send all non-Shippable jobs to us-east-1 to reduce api key maintenance
region = 'us-east-1'
self.path = "%s-%s" % (self.path, region)
self.endpoints = (AWS_ENDPOINTS[region],)
self.ssh_key = SshKey(args)
if self.platform == 'windows':
self.port = 5986
else:
self.port = 22
elif self.provider == 'parallels':
self.endpoints = self._get_parallels_endpoints()
self.max_threshold = 6
self.ssh_key = SshKey(args)
self.port = None
else:
raise ApplicationError('Unsupported platform: %s' % platform)
if persist and load and self._load():
try:
display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.connection = self.get(always_raise_on=[404])
display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
except HttpError as ex:
if ex.status != 404:
raise
self._clear()
display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.instance_id = None
self.endpoint = None
elif not persist:
self.instance_id = None
self.endpoint = None
self._clear()
if self.instance_id:
self.started = True
else:
self.started = False
self.instance_id = str(uuid.uuid4())
self.endpoint = None
display.sensitive.add(self.instance_id)
def _get_parallels_endpoints(self):
"""
:rtype: tuple[str]
"""
client = HttpClient(self.args, always=True)
display.info('Getting available endpoints...', verbosity=1)
sleep = 3
for _ in range(1, 10):
response = client.get('https://s3.amazonaws.com/ansible-ci-files/ansible-test/parallels-endpoints.txt')
if response.status_code == 200:
endpoints = tuple(response.response.splitlines())
display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1)
return endpoints
display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep))
time.sleep(sleep)
raise ApplicationError('Unable to get available endpoints.')
def start(self):
"""Start instance."""
if self.started:
display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return None
if is_shippable():
return self.start_shippable()
return self.start_remote()
def start_remote(self):
"""Start instance for remote development/testing."""
with open(self.ci_key, 'r') as key_fd:
auth_key = key_fd.read().strip()
return self._start(dict(
remote=dict(
key=auth_key,
nonce=None,
),
))
def start_shippable(self):
"""Start instance on Shippable."""
return self._start(dict(
shippable=dict(
run_id=os.environ['SHIPPABLE_BUILD_ID'],
job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
),
))
def stop(self):
"""Stop instance."""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
response = self.client.delete(self._uri)
if response.status_code == 404:
self._clear()
display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
if response.status_code == 200:
self._clear()
display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
raise self._create_http_error(response)
def get(self, tries=3, sleep=15, always_raise_on=None):
"""
Get instance connection information.
:type tries: int
:type sleep: int
:type always_raise_on: list[int] | None
:rtype: InstanceConnection
"""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return None
if not always_raise_on:
always_raise_on = []
if self.connection and self.connection.running:
return self.connection
while True:
tries -= 1
response = self.client.get(self._uri)
if response.status_code == 200:
break
error = self._create_http_error(response)
if not tries or response.status_code in always_raise_on:
raise error
display.warning('%s. Trying again after %d seconds.' % (error, sleep))
time.sleep(sleep)
if self.args.explain:
self.connection = InstanceConnection(
running=True,
hostname='cloud.example.com',
port=self.port or 12345,
username='username',
password='password' if self.platform == 'windows' else None,
)
else:
response_json = response.json()
status = response_json['status']
con = response_json['connection']
self.connection = InstanceConnection(
running=status == 'running',
hostname=con['hostname'],
port=int(con.get('port', self.port)),
username=con['username'],
password=con.get('password'),
)
if self.connection.password:
display.sensitive.add(self.connection.password)
status = 'running' if self.connection.running else 'starting'
display.info('Status update: %s/%s on instance %s is %s.' %
(self.platform, self.version, self.instance_id, status),
verbosity=1)
return self.connection
def wait(self):
"""Wait for the instance to become ready."""
for _ in range(1, 90):
if self.get().running:
return
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.platform, self.version, self.instance_id))
@property
def _uri(self):
return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.resource, self.instance_id)
def _start(self, auth):
"""Start instance."""
display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
if self.platform == 'windows':
with open('examples/scripts/ConfigureRemotingForAnsible.ps1', 'rb') as winrm_config_fd:
winrm_config = winrm_config_fd.read().decode('utf-8')
else:
winrm_config = None
data = dict(
config=dict(
platform=self.platform,
version=self.version,
public_key=self.ssh_key.pub_contents if self.ssh_key else None,
query=False,
winrm_config=winrm_config,
)
)
data.update(dict(auth=auth))
headers = {
'Content-Type': 'application/json',
}
response = self._start_try_endpoints(data, headers)
self.started = True
self._save()
display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
if self.args.explain:
return {}
return response.json()
def _start_try_endpoints(self, data, headers):
"""
:type data: dict[str, any]
:type headers: dict[str, str]
:rtype: HttpResponse
"""
threshold = 1
while threshold <= self.max_threshold:
for self.endpoint in self.endpoints:
try:
return self._start_at_threshold(data, headers, threshold)
except CoreHttpError as ex:
if ex.status == 503:
display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1)
continue
display.error(ex.remote_message)
except HttpError as ex:
display.error(u'%s' % ex)
time.sleep(3)
threshold += 1
raise ApplicationError('Maximum threshold reached and all endpoints exhausted.')
def _start_at_threshold(self, data, headers, threshold):
"""
:type data: dict[str, any]
:type headers: dict[str, str]
:type threshold: int
:rtype: HttpResponse | None
"""
tries = 3
sleep = 15
data['threshold'] = threshold
display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1)
while True:
tries -= 1
response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
if response.status_code == 200:
return response
error = self._create_http_error(response)
if response.status_code == 503:
raise error
if not tries:
raise error
display.warning('%s. Trying again after %d seconds.' % (error, sleep))
time.sleep(sleep)
def _clear(self):
"""Clear instance information."""
try:
self.connection = None
os.remove(self.path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def _load(self):
"""Load instance information."""
try:
with open(self.path, 'r') as instance_fd:
data = instance_fd.read()
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
return False
if not data.startswith('{'):
return False # legacy format
config = json.loads(data)
return self.load(config)
def load(self, config):
"""
:type config: dict[str, str]
:rtype: bool
"""
self.instance_id = config['instance_id']
self.endpoint = config['endpoint']
self.started = True
display.sensitive.add(self.instance_id)
return True
def _save(self):
"""Save instance information."""
if self.args.explain:
return
config = self.save()
make_dirs(os.path.dirname(self.path))
with open(self.path, 'w') as instance_fd:
instance_fd.write(json.dumps(config, indent=4, sort_keys=True))
def save(self):
"""
:rtype: dict[str, str]
"""
return dict(
platform_version='%s/%s' % (self.platform, self.version),
instance_id=self.instance_id,
endpoint=self.endpoint,
)
@staticmethod
def _create_http_error(response):
"""
:type response: HttpResponse
:rtype: ApplicationError
"""
response_json = response.json()
stack_trace = ''
if 'message' in response_json:
message = response_json['message']
elif 'errorMessage' in response_json:
message = response_json['errorMessage'].strip()
if 'stackTrace' in response_json:
trace = '\n'.join([x.rstrip() for x in traceback.format_list(response_json['stackTrace'])])
stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
else:
message = str(response_json)
return CoreHttpError(response.status_code, message, stack_trace)
class CoreHttpError(HttpError):
"""HTTP response as an error."""
def __init__(self, status, remote_message, remote_stack_trace):
"""
:type status: int
:type remote_message: str
:type remote_stack_trace: str
"""
super(CoreHttpError, self).__init__(status, '%s%s' % (remote_message, remote_stack_trace))
self.remote_message = remote_message
self.remote_stack_trace = remote_stack_trace
class SshKey(object):
"""Container for SSH key used to connect to remote instances."""
KEY_NAME = 'id_rsa'
PUB_NAME = 'id_rsa.pub'
def __init__(self, args):
"""
:type args: EnvironmentConfig
"""
cache_dir = 'test/cache'
self.key = os.path.join(cache_dir, self.KEY_NAME)
self.pub = os.path.join(cache_dir, self.PUB_NAME)
if not os.path.isfile(self.key) or not os.path.isfile(self.pub):
base_dir = os.path.expanduser('~/.ansible/test/')
key = os.path.join(base_dir, self.KEY_NAME)
pub = os.path.join(base_dir, self.PUB_NAME)
if not args.explain:
make_dirs(base_dir)
if not os.path.isfile(key) or not os.path.isfile(pub):
run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', 'rsa', '-N', '', '-f', key])
if not args.explain:
shutil.copy2(key, self.key)
shutil.copy2(pub, self.pub)
if args.explain:
self.pub_contents = None
else:
with open(self.pub, 'r') as pub_fd:
self.pub_contents = pub_fd.read().strip()
class InstanceConnection(object):
"""Container for remote instance status and connection details."""
def __init__(self, running, hostname, port, username, password):
"""
:type running: bool
:type hostname: str
:type port: int
:type username: str
:type password: str | None
"""
self.running = running
self.hostname = hostname
self.port = port
self.username = username
self.password = password
def __str__(self):
if self.password:
return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
return '%s:%s [%s]' % (self.hostname, self.port, self.username)
| |
"""Monte Carlo tree search algorithm for reinforcement learning."""
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph.optimizers import Adam
from deepchem.models.tensorgraph.layers import Feature, Weights, Label, Layer
import numpy as np
import tensorflow as tf
import collections
import copy
import time
class MCTSLoss(Layer):
"""This layer computes the loss function for MCTS."""
def __init__(self, value_weight, **kwargs):
super(MCTSLoss, self).__init__(**kwargs)
self.value_weight = value_weight
def create_tensor(self, **kwargs):
pred_prob, pred_value, search_prob, search_value = [
layer.out_tensor for layer in self.in_layers
]
log_prob = tf.log(pred_prob + np.finfo(np.float32).eps)
probability_loss = -tf.reduce_mean(search_prob * log_prob)
value_loss = tf.reduce_mean(tf.square(pred_value - search_value))
self.out_tensor = probability_loss + self.value_weight * value_loss
self.probability_loss = probability_loss
self.value_loss = value_loss
return self.out_tensor
class MCTS(object):
"""
Implements a Monte Carlo tree search algorithm for reinforcement learning.
This is adapted from Silver et al, "Mastering the game of Go without human
knowledge" (https://www.nature.com/articles/nature24270). The methods
described in that paper rely on features of Go that are not generally true of
all reinforcement learning problems. To transform it into a more generally
useful RL algorithm, it has been necessary to change some aspects of the
method. The overall approach used in this implementation is still the same,
although some of the details differ.
This class requires the policy to output two quantities: a vector giving the
probability of taking each action, and an estimate of the value function for
the current state. At every step of simulating an episode, it performs an
expensive tree search to explore the consequences of many possible actions.
Based on that search, it computes much better estimates for the value function
of the current state and the desired action probabilities. In then tries to
optimize the policy to make its outputs match the result of the tree search.
Optimization proceeds through a series of iterations. Each iteration consists
of two stages:
1. Simulate many episodes. At every step perform a tree search to determine
targets for the probabilities and value function, and store them into a
buffer.
2. Optimize the policy using batches drawn from the buffer generated in step 1.
The tree search involves repeatedly selecting actions starting from the
current state. This is done by using deepcopy() to clone the environment. It
is essential that this produce a deterministic sequence of states: performing
an action on the cloned environment must always lead to the same state as
performing that action on the original environment. For environments whose
state transitions are deterministic, this is not a problem. For ones whose
state transitions are stochastic, it is essential that the random number
generator used to select new states be stored as part of the environment and
be properly cloned by deepcopy().
This class does not support policies that include recurrent layers.
"""
def __init__(self,
env,
policy,
max_search_depth=100,
n_search_episodes=1000,
discount_factor=0.99,
value_weight=1.0,
optimizer=Adam(),
model_dir=None):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. Its create_layers() method must return a dict containing the
keys 'action_prob' and 'value', corresponding to the action probabilities and value estimate
max_search_depth: int
the maximum depth of the tree search, measured in steps
n_search_episodes: int
the number of episodes to simulate (up to max_search_depth, if they do not
terminate first) for each tree search
discount_factor: float
the discount factor to use when computing rewards
value_weight: float
a scale factor for the value loss term in the loss function
optimizer: Optimizer
the optimizer to use
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
"""
self._env = copy.deepcopy(env)
self._policy = policy
self.max_search_depth = max_search_depth
self.n_search_episodes = n_search_episodes
self.discount_factor = discount_factor
self.value_weight = value_weight
self._state_is_list = isinstance(env.state_shape[0], collections.Sequence)
if optimizer is None:
self._optimizer = Adam(learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
(self._graph, self._features, self._pred_prob, self._pred_value,
self._search_prob, self._search_value) = self._build_graph(
None, 'global', model_dir)
def _build_graph(self, tf_graph, scope, model_dir):
"""Construct a TensorGraph containing the policy and loss calculations."""
state_shape = self._env.state_shape
state_dtype = self._env.state_dtype
if not self._state_is_list:
state_shape = [state_shape]
state_dtype = [state_dtype]
features = []
for s, d in zip(state_shape, state_dtype):
features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
policy_layers = self._policy.create_layers(features)
action_prob = policy_layers['action_prob']
value = policy_layers['value']
search_prob = Label(shape=(None, self._env.n_actions))
search_value = Label(shape=(None,))
loss = MCTSLoss(
self.value_weight,
in_layers=[action_prob, value, search_prob, search_value])
graph = TensorGraph(
batch_size=self.max_search_depth,
use_queue=False,
graph=tf_graph,
model_dir=model_dir)
for f in features:
graph._add_layer(f)
graph.add_output(action_prob)
graph.add_output(value)
graph.set_loss(loss)
graph.set_optimizer(self._optimizer)
with graph._get_tf("Graph").as_default():
with tf.variable_scope(scope):
graph.build()
if len(graph.rnn_initial_states) > 0:
raise ValueError('MCTS does not support policies with recurrent layers')
return graph, features, action_prob, value, search_prob, search_value
def fit(self,
iterations,
steps_per_iteration=10000,
epochs_per_iteration=10,
temperature=0.5,
puct_scale=None,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
iterations: int
the total number of iterations (simulation followed by optimization) to perform
steps_per_iteration: int
the total number of steps to simulate in each iteration. Every step consists
of a tree search, followed by selecting an action based on the results of
the search.
epochs_per_iteration: int
the number of epochs of optimization to perform for each iteration. Each
epoch involves randomly ordering all the steps that were just simulated in
the current iteration, splitting them into batches, and looping over the
batches.
temperature: float
the temperature factor to use when selecting a move for each step of
simulation. Larger values produce a broader probability distribution and
hence more exploration. Smaller values produce a stronger preference for
whatever action did best in the tree search.
puct_scale: float
the scale of the PUCT term in the expression for selecting actions during
tree search. This should be roughly similar in magnitude to the rewards
given by the environment, since the PUCT term is added to the mean
discounted reward. This may be None, in which case a value is adaptively
selected that tries to match the mean absolute value of the discounted
reward.
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
if puct_scale is None:
self._puct_scale = 1.0
adapt_puct = True
else:
self._puct_scale = puct_scale
adapt_puct = False
with self._graph._get_tf("Graph").as_default():
self._graph.session.run(tf.global_variables_initializer())
if restore:
self.restore()
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables, max_to_keep=max_checkpoints_to_keep)
self._checkpoint_index = 0
self._checkpoint_time = time.time() + checkpoint_interval
# Run the algorithm.
for iteration in range(iterations):
buffer = self._run_episodes(steps_per_iteration, temperature, saver,
adapt_puct)
self._optimize_policy(buffer, epochs_per_iteration)
# Save a file checkpoint.
self._checkpoint_index += 1
saver.save(
self._graph.session,
self._graph.save_file,
global_step=self._checkpoint_index)
def predict(self, state):
"""Compute the policy's output predictions for a state.
Parameters
----------
state: array
the state of the environment for which to generate predictions
Returns
-------
the array of action probabilities, and the estimated value function
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state)
tensors = [self._pred_prob, self._pred_value]
results = self._graph.session.run(tensors, feed_dict=feed_dict)
return results[:2]
def select_action(self, state, deterministic=False):
"""Select an action to perform based on the environment's state.
Parameters
----------
state: array
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
Returns
-------
the index of the selected action
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state)
probabilities = self._graph.session.run(
self._pred_prob, feed_dict=feed_dict)
if deterministic:
return probabilities.argmax()
else:
return np.random.choice(
np.arange(self._env.n_actions), p=probabilities[0])
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._graph.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
with self._graph._get_tf("Graph").as_default():
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables)
saver.restore(self._graph.session, last_checkpoint)
def _create_feed_dict(self, state):
"""Create a feed dict for use by predict() or select_action()."""
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self._features, state))
return feed_dict
def _run_episodes(self, steps, temperature, saver, adapt_puct):
"""Simulate the episodes for one iteration."""
buffer = []
self._env.reset()
root = TreeSearchNode(0.0)
for step in range(steps):
prob, reward = self._do_tree_search(root, temperature, adapt_puct)
state = self._env.state
if not self._state_is_list:
state = [state]
buffer.append((state, prob, reward))
action = np.random.choice(np.arange(self._env.n_actions), p=prob)
self._env.step(action)
if self._env.terminated:
self._env.reset()
root = TreeSearchNode(0.0)
else:
root = root.children[action]
if time.time() > self._checkpoint_time:
self._checkpoint_index += 1
saver.save(
self._graph.session,
self._graph.save_file,
global_step=self._checkpoint_index)
self._checkpoint_time = time.time()
return buffer
def _optimize_policy(self, buffer, epochs):
"""Optimize the policy based on the replay buffer from the current iteration."""
batch_size = self._graph.batch_size
n_batches = len(buffer) // batch_size
for epoch in range(epochs):
np.random.shuffle(buffer)
def generate_batches():
for batch in range(n_batches):
indices = list(range(batch * batch_size, (batch + 1) * batch_size))
feed_dict = {}
for i, f in enumerate(self._features):
feed_dict[f] = np.stack(buffer[j][0][i] for j in indices)
feed_dict[self._search_prob] = np.stack(buffer[j][1] for j in indices)
feed_dict[self._search_value] = np.array(
[buffer[j][2] for j in indices])
yield feed_dict
loss = self._graph.fit_generator(
generate_batches(), checkpoint_interval=0)
def _do_tree_search(self, root, temperature, adapt_puct):
"""Perform the tree search for a state."""
# Build the tree.
for i in range(self.n_search_episodes):
env = copy.deepcopy(self._env)
self._create_trace(env, root, 1)
# Compute the final probabilities and expected reward.
prob = np.array([c.count**(1.0 / temperature) for c in root.children])
prob /= np.sum(prob)
reward = np.sum(p * c.mean_reward for p, c in zip(prob, root.children))
if adapt_puct:
scale = np.sum(
[p * np.abs(c.mean_reward) for p, c in zip(prob, root.children)])
self._puct_scale = 0.99 * self._puct_scale + 0.01 * scale
return prob, reward
def _create_trace(self, env, node, depth):
"""Create one trace as part of the tree search."""
node.count += 1
if env.terminated:
# Mark this node as terminal
node.children = None
node.value = 0.0
return 0.0
if node.children is not None and len(node.children) == 0:
# Expand this node.
prob_pred, value = self.predict(env.state)
node.value = float(value)
node.children = [TreeSearchNode(p) for p in prob_pred[0]]
if depth == self.max_search_depth:
reward = 0.0
future_rewards = node.value
else:
# Select the next action to perform.
total_counts = sum(c.count for c in node.children)
if total_counts == 0:
score = [c.prior_prob for c in node.children]
else:
scale = self._puct_scale * np.sqrt(total_counts)
score = [
c.mean_reward + scale * c.prior_prob / (1 + c.count)
for c in node.children
]
action = np.argmax(score)
next_node = node.children[action]
reward = env.step(action)
# Recursively build the tree.
future_rewards = self._create_trace(env, next_node, depth + 1)
# Update statistics for this node.
future_rewards = reward + self.discount_factor * future_rewards
node.total_reward += future_rewards
node.mean_reward = node.total_reward / node.count
return future_rewards
class TreeSearchNode(object):
"""Represents a node in the Monte Carlo tree search."""
def __init__(self, prior_prob):
self.count = 0
self.reward = 0.0
self.total_reward = 0.0
self.mean_reward = 0.0
self.prior_prob = prior_prob
self.children = []
| |
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import IECore
import IECoreGL
IECoreGL.init( False )
class TestShader( unittest.TestCase ) :
def testConstructor( self ) :
self.assertRaises( RuntimeError, IECoreGL.Shader, "i don't think i'm valid", "me neither" )
vertexSource = """
void main()
{
gl_Position = ftransform();
}
"""
fragmentSource = """
void main()
{
gl_FragColor = vec4( 1, 0.5, 0.25, 1 );
}
"""
s1 = IECoreGL.Shader( vertexSource, fragmentSource )
self.assertNotEqual( s1.program(), 0 )
self.assertEqual( s1.vertexSource(), vertexSource )
self.assertEqual( s1.geometrySource(), "" )
self.assertEqual( s1.fragmentSource(), fragmentSource )
s2 = IECoreGL.Shader( "", "", fragmentSource )
self.assertNotEqual( s2.program(), 0 )
self.assertNotEqual( s1.program(), s2.program() )
self.assertEqual( s2.vertexSource(), "" )
self.assertEqual( s2.geometrySource(), "" )
self.assertEqual( s2.fragmentSource(), fragmentSource )
def testUniformParameters( self ) :
vertexSource = """
attribute float floatAttrib;
varying float varyingFloatParm;
void main()
{
gl_Position = ftransform();
varyingFloatParm = floatAttrib * gl_Position.x;
}
"""
fragmentSource = """
uniform bool boolParm;
uniform int intParm;
uniform float floatParm;
uniform bvec2 bvec2Parm;
uniform bvec3 bvec3Parm;
// uniform ivec4 bvec4Parm; // we have no suitable datatype for specifying this in IECore
uniform ivec2 ivec2Parm;
uniform ivec3 ivec3Parm;
// uniform ivec4 ivec4Parm; // we have no suitable datatype for specifying this in IECore
uniform vec2 vec2Parm;
uniform vec3 vec3Parm;
uniform vec4 vec4Parm;
uniform sampler2D s2D;
uniform struct {
int i;
float f;
} s;
uniform mat3 mat3Parm;
uniform mat4 mat4Parm;
varying float varyingFloatParm;
void main()
{
float x = vec4Parm.r + vec3Parm.g + vec2Parm.y + floatParm + float( intParm ) + float( boolParm );
float xx = float( ivec2Parm.x ) + float( ivec3Parm.y ) + float( bvec2Parm.x ) + float( bvec3Parm.y );
float xxx = float( s.i ) + s.f + texture2D( s2D, vec2Parm ).r;
vec4 p = mat4Parm * gl_FragCoord;
vec3 pp = mat3Parm * gl_FragCoord.xyz;
gl_FragColor = vec4( x + xx + xxx + p.x + pp.x, gl_Color.g, varyingFloatParm, 1 );
}
"""
s = IECoreGL.Shader( vertexSource, fragmentSource )
self.assert_( s==s )
expectedParameterNames = [
"boolParm",
"intParm",
"floatParm",
"bvec2Parm",
"bvec3Parm",
"ivec2Parm",
"ivec3Parm",
"vec2Parm",
"vec3Parm",
"vec4Parm",
"s.f",
"s.i",
"s2D",
"mat3Parm",
"mat4Parm",
]
parameterNames = s.uniformParameterNames()
self.assertEqual( len( parameterNames ), len( expectedParameterNames ) )
for n in expectedParameterNames :
self.assert_( n in parameterNames )
self.assertTrue( s.uniformParameter( n ) is not None )
self.assertTrue( s.uniformParameter( n + "VeryUnlikelySuffix" ) is None )
self.assertEqual( s.uniformParameter( n ).size, 1 )
def testUniformArrayParameters( self ) :
# TODO: get bool/bvec2/bvec3 array parameters working, and test them.
vertexSource = """
attribute float floatAttrib;
varying float varyingFloatParm;
void main()
{
gl_Position = ftransform();
varyingFloatParm = floatAttrib * gl_Position.x;
}
"""
fragmentSource = """
// uniform bool boolParm[4];
uniform int intParm[2];
uniform float floatParm[4];
// uniform bvec2 bvec2Parm[4];
// uniform bvec3 bvec3Parm[4];
// uniform ivec4 bvec4Parm; // we have no suitable datatype for specifying this in IECore
uniform ivec2 ivec2Parm[5];
uniform ivec3 ivec3Parm[6];
// uniform ivec4 ivec4Parm; // we have no suitable datatype for specifying this in IECore
uniform vec2 vec2Parm[2];
uniform vec3 vec3Parm[3];
uniform vec4 vec4Parm[4];
//uniform sampler2D s2D[4];
uniform struct {
int i;
float f;
} s;
uniform mat3 mat3Parm[4];
uniform mat4 mat4Parm[4];
varying float varyingFloatParm;
void main()
{
float x = vec4Parm[0].r + vec3Parm[0].g + vec2Parm[0].y + floatParm[0] + float( intParm[0] ); // + float( boolParm[0] );
float xx = float( ivec2Parm[0].x ) + float( ivec3Parm[0].y ); // + float( bvec2Parm[0].x ) + float( bvec3Parm[0].y );
float xxx = vec2Parm[0].x;
vec4 p = mat4Parm[0] * gl_FragCoord;
vec3 pp = mat3Parm[0] * gl_FragCoord.xyz;
gl_FragColor = vec4( x + xx + xxx + p.x + pp.x, gl_Color.g, varyingFloatParm, 1 );
}
"""
s = IECoreGL.Shader( vertexSource, fragmentSource )
self.assert_( s==s )
expectedParameterNamesAndSizes = {
"intParm" : 2,
"floatParm" : 4,
"ivec2Parm" : 5,
"ivec3Parm" : 6,
"vec2Parm" : 2,
"vec3Parm" : 3,
"vec4Parm" : 4,
"mat3Parm" : 4,
"mat4Parm" : 4,
}
parameterNames = s.uniformParameterNames()
self.assertEqual( len( parameterNames ), len( expectedParameterNamesAndSizes ) )
for n in expectedParameterNamesAndSizes.keys() :
self.assertTrue( n in parameterNames )
self.assertTrue( s.uniformParameter( n ) is not None )
self.assertTrue( s.uniformParameter( n + "VeryUnlikelySuffix" ) is None )
self.assertEqual( s.uniformParameter( n ).size, expectedParameterNamesAndSizes[n] )
def testVertexParameters( self ) :
vertexSource = """
attribute float floatParm;
attribute vec2 vec2Parm;
attribute vec3 vec3Parm;
attribute vec4 vec4Parm;
varying vec4 myColor;
void main()
{
myColor = vec4( floatParm + vec2Parm.x, vec3Parm.y, vec4Parm.r, 1 );
gl_Position = ftransform();
}
"""
fragmentSource = """
varying vec4 myColor;
void main()
{
gl_FragColor = myColor;
}
"""
s = IECoreGL.Shader( vertexSource, fragmentSource )
self.assert_( s==s )
expectedParameterNames = [
"floatParm",
"vec2Parm",
"vec3Parm",
"vec4Parm",
]
parameterNames = s.vertexAttributeNames()
self.assertEqual( len( parameterNames ), len( expectedParameterNames ) )
for n in expectedParameterNames :
self.assert_( n in parameterNames )
self.assertTrue( s.vertexAttribute( n ) is not None )
self.assertTrue( s.vertexAttribute( n + "VeryUnlikelySuffix" ) is None )
self.assertEqual( s.vertexAttribute( n ).size, 1 )
def testGeometryShader( self ) :
geometrySource = """
#version 150
layout( triangles ) in;
layout( triangle_strip, max_vertices=3 ) out;
uniform float geometryShaderParameter = 0;
void main()
{
for( int i = 0; i < gl_in.length(); i++)
{
gl_Position = gl_in[i].gl_Position + vec4( geometryShaderParameter, 0, 0, 1 );
EmitVertex();
}
}
"""
s = IECoreGL.Shader( IECoreGL.Shader.defaultVertexSource(), geometrySource, IECoreGL.Shader.defaultFragmentSource() )
self.failUnless( "geometryShaderParameter" in s.uniformParameterNames() )
def testEmptyGeometryShader( self ) :
s = IECoreGL.Shader( IECoreGL.Shader.defaultVertexSource(), "", IECoreGL.Shader.defaultFragmentSource() )
def testStandardParameters( self ) :
s = IECoreGL.Shader.constant()
self.assertEqual( s.csParameter(), s.uniformParameter( "Cs" ) )
if __name__ == "__main__":
unittest.main()
| |
"""
Modified and extended from https://github.com/camsaul/django-rest-params/blob/master/django_rest_params/decorators.py
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
from functools import wraps
from rest_framework.exceptions import APIException
from rest_framework.views import APIView
from six import string_types
TRUE_VALUES = ('1', 'true')
FALSE_VALUES = ('0', 'false')
BOOL_PARTS = ('deferred', 'optional', 'many')
NUM_PARTS = ('gt', 'gte', 'lt', 'lte', 'eq')
class InvalidQueryParamsException(APIException):
status_code = 400
default_detail = 'Params of the wrong type were passed on the request'
default_code = 'invalid_parameters'
class MissingRequiredParamsException(APIException):
status_code = 412
default_detail = 'Required query parameters were missing from the request'
default_code = 'missing_parameters'
# Types that we'll all for as 'tuple' params
TUPLE_TYPES = tuple, set, frozenset, list
if (sys.version_info > (3, 0)):
VALID_TYPES = int, float, str, bool
else:
VALID_TYPES = int, float, str, unicode, bool
class ParamValidator(object):
# name
param_name = None # the name of the param in the request, e.g. 'user_id' (even if we pass 'user' to the Fn)
# type
param_type = None
# method - explicitly allow a certain method. If both are false we'll use defaults
allow_GET = False
allow_POST = False
# value validators
gt = None
gte = None
lt = None
lte = None
eq = None
# optional
optional = False
default = None
# multiple vals
many = False
# django models only
deferred = True
field = 'id'
def __init__(self, arg_name):
self.param_name = arg_name
def check_tuple_type(self, param):
if param not in self.param_type:
raise InvalidQueryParamsException('invalid option "%s": Must be one of: %s' % (param, self.param_type))
def check_non_tuple_types(self, param):
if self.param_type == int:
param = int(param)
elif self.param_type == float:
param = float(param)
elif self.param_type == str:
assert(isinstance(param, string_types))
elif self.param_type == bool:
param = str(param).lower() # bool isn't case sensitive
if param in TRUE_VALUES:
param = True
elif param in FALSE_VALUES:
param = False
else:
raise InvalidQueryParamsException('%s is not a valid bool: must be one of: %s', param, TRUE_VALUES + FALSE_VALUES)
elif hasattr(self.param_type, '_default_manager'): # isinstance(django.models.Model) doesn't seem to work, but this is a good tell
query_set = self.param_type.objects
if self.deferred:
query_set = query_set.only('id')
param = query_set.get(**{self.field: param})
else:
raise InvalidQueryParamsException("Invalid param type: %s" % self.param_type.____name__)
return param
def check_type(self, param):
""" Check that the type of param is valid, or raise an Exception. This doesn't take self.many into account. """
if isinstance(self.param_type, TUPLE_TYPES):
self.check_tuple_type(param)
else:
param = self.check_non_tuple_types(param)
return param
def check_value(self, param):
""" Check that a single value is lt/gt/etc. Doesn't take self.many into account. """
if self.param_type == int or self.param_type == float:
self.check_value_constraints(param)
elif self.param_type == str:
self.check_value_constraints(len(param))
return True
def check_value_constraints(self, param):
try:
if self.eq and param != self.eq:
raise InvalidQueryParamsException("must be less than %s!" % self.eq)
else:
if self.lt and param >= self.lt:
raise InvalidQueryParamsException("must be less than %s!" % self.lt)
if self.lte and param > self.lte:
raise InvalidQueryParamsException("must be less than or equal to %s!" % self.lte)
if self.gt and param <= self.gt:
raise InvalidQueryParamsException("must be greater than %s!" % self.gt)
if self.gte and param < self.gte:
raise InvalidQueryParamsException("must be greater than or equal to %s!" % self.gte)
except InvalidQueryParamsException as e:
msg = str(e)
msg = ("Length " if self.param_type == str else 'Value ') + msg
raise InvalidQueryParamsException(msg)
def set_type(self, param_type):
if not hasattr(param_type, '_default_manager'): # django model
if not isinstance(param_type, TUPLE_TYPES) and param_type not in VALID_TYPES:
raise InvalidQueryParamsException("Invalid type for %s: %s is not a valid type" % (self.param_name, param_type))
self.param_type = param_type
def set_method(self, value):
if isinstance(value, TUPLE_TYPES):
for method in value:
if method == 'GET':
self.allow_GET = True
elif method == 'POST':
self.allow_POST = True
else:
raise InvalidQueryParamsException('Invalid value for __method: "%s"' % method)
else:
if value == 'GET':
self.allow_GET = True
elif value == 'POST':
self.allow_POST = True
else:
raise InvalidQueryParamsException('Invalid value for __method: "%s"' % value)
def set_constraints(self, suffix, value):
if suffix == 'method':
self.set_method(value)
elif suffix in BOOL_PARTS:
assert(isinstance(value, bool))
setattr(self, suffix, value)
elif suffix in NUM_PARTS:
assert(isinstance(value, int) or isinstance(value, float))
setattr(self, suffix, value)
elif suffix == 'default':
self.optional = True
self.default = value
elif suffix == 'field':
assert(isinstance(suffix, str))
self.field = value
else:
raise InvalidQueryParamsException("Invalid option: '__{suffix}' in param '{param_name}'".format(suffix=suffix, param_name=self.param_name))
def validate(self, request):
request_method = request.META['REQUEST_METHOD']
default_param_method = 'POST' if request_method == 'POST' or request_method == 'PUT' else 'GET'
# what methods are allowed?
use_default_methods = not self.allow_GET and not self.allow_POST
allow_GET = (default_param_method == 'GET') if use_default_methods else self.allow_GET
allow_POST = (default_param_method == 'POST') if use_default_methods else self.allow_POST
# find the param
param = None
if allow_POST:
param = request.DATA.get(self.param_name, None)
param_type = 'POST'
if not param and allow_GET:
param = request.GET.get(self.param_name, None)
param_type = 'GET'
# optional/default
if param is None: # but not False, because that's a valid boolean param
if not self.optional:
raise MissingRequiredParamsException(self.param_name)
else:
return self.default
# check type, value
if self.many:
if param_type == 'GET':
params = str(param).split(',')
else:
params = param if isinstance(param, list) else (param,)
return [self.check_type(p) for p in params if self.check_value(p)]
else:
param = self.check_type(param)
self.check_value(param)
return param
def query_params_required(**kwargs):
"""
Request fn decorator that builds up a list of params and automatically returns a 400 if they are invalid.
The validated params are passed to the wrapped function as kwargs.
"""
validators = {}
for key, value in kwargs.items():
parts = key.split('__')
param_key = parts[0]
if param_key not in validators:
validators[param_key] = ParamValidator(param_key)
validator = validators[param_key]
if (len(parts) == 1):
# set type
validator.set_type(value)
else:
# we only are interested in the last part, since the only thing that can be multipart is __length__eq (etc) and 'length' is not important
suffix = parts[-1]
validator.set_constraints(suffix, value)
def _params(cls):
assert issubclass(cls, APIView), "query_params_required decorator can only be used on subclasses of APIView"
class Wrapper(cls):
def initial(self, request, *args, **kwargs):
# Copy this from the default viewset initial behaviour, otherwise it is not set before a
# validation exception would be raised.
self.format_kwarg = self.get_format_suffix(**kwargs)
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
# Validate the params
missing_params = []
for arg_name, validator in validators.items():
try:
kwargs[arg_name] = validator.validate(request)
except MissingRequiredParamsException:
missing_params.append(validator.param_name)
if missing_params:
raise MissingRequiredParamsException('The following parameters were missing and are required: {required}'.format(
required=', '.join(missing_params)
))
# Update the kwargs on the view itself
self.kwargs = kwargs
super(Wrapper, self).initial(request, *args, **kwargs)
return Wrapper
return _params
def signin_redirect_exempt(view_func):
"""Mark a view function as being exempt from the signin page redirect"""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.signin_redirect_exempt = True
return wraps(view_func)(wrapped_view)
| |
from collections import OrderedDict
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Timestamp
import pandas._testing as tm
class TestSelectDtypes:
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
| |
import asyncio
import http.cookies
import io
import json
import mimetypes
import os
import sys
import traceback
import urllib.parse
import warnings
try:
import cchardet as chardet
except ImportError:
import chardet
import aiohttp
from . import hdrs, helpers, streams
from .log import client_logger
from .streams import EOF_MARKER, FlowControlStreamReader
from .multidict import (CIMultiDictProxy, MultiDictProxy, MultiDict,
CIMultiDict)
from .multipart import MultipartWriter
from .protocol import HttpMessage
PY_35 = sys.version_info >= (3, 5)
HTTP_PORT = 80
HTTPS_PORT = 443
class ClientRequest:
GET_METHODS = {hdrs.METH_GET, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union(
{hdrs.METH_DELETE, hdrs.METH_TRACE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: '*/*',
hdrs.ACCEPT_ENCODING: 'gzip, deflate',
}
SERVER_SOFTWARE = HttpMessage.SERVER_SOFTWARE
body = b''
auth = None
response = None
response_class = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(self, method, url, *,
params=None, headers=None, skip_auto_headers=frozenset(),
data=None, cookies=None,
files=None, auth=None, encoding='utf-8',
version=aiohttp.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None):
if loop is None:
loop = asyncio.get_event_loop()
self.url = url
self.method = method.upper()
self.encoding = encoding
self.chunked = chunked
self.compress = compress
self.loop = loop
self.response_class = response_class or ClientResponse
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_path(params)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding()
self.update_auth(auth)
if files:
warnings.warn(
'files parameter is deprecated. use data instead',
DeprecationWarning)
if data:
raise ValueError(
'data and files parameters are '
'not supported at the same time.')
data = files
self.update_body_from_data(data, skip_auto_headers)
self.update_transfer_encoding()
self.update_expect_continue(expect100)
def update_host(self, url):
"""Update destination host, port and connection type (ssl)."""
url_parsed = urllib.parse.urlsplit(url)
# check for network location part
netloc = url_parsed.netloc
if not netloc:
raise ValueError('Host could not be detected.')
# get host/port
host = url_parsed.hostname
if not host:
raise ValueError('Host could not be detected.')
try:
port = url_parsed.port
except ValueError:
raise ValueError(
'Port number could not be converted.') from None
# check domain idna encoding
try:
netloc = netloc.encode('idna').decode('utf-8')
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise ValueError('URL has an invalid label.')
# basic auth info
username, password = url_parsed.username, url_parsed.password
if username:
self.auth = helpers.BasicAuth(username, password or '')
netloc = netloc.split('@', 1)[1]
# Record entire netloc for usage in host header
self.netloc = netloc
scheme = url_parsed.scheme
self.ssl = scheme in ('https', 'wss')
# set port number if it isn't already set
if not port:
if self.ssl:
port = HTTPS_PORT
else:
port = HTTP_PORT
self.host, self.port, self.scheme = host, port, scheme
def update_version(self, version):
"""Convert request version to two elements tuple.
parser http version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [l.strip() for l in version.split('.', 1)]
try:
version = int(v[0]), int(v[1])
except ValueError:
raise ValueError(
'Can not parse http version number: {}'
.format(version)) from None
self.version = version
def update_path(self, params):
"""Build path."""
# extract path
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(self.url)
if not path:
path = '/'
if isinstance(params, dict):
params = list(params.items())
elif isinstance(params, (MultiDictProxy, MultiDict)):
params = list(params.items())
if params:
params = urllib.parse.urlencode(params)
if query:
query = '%s&%s' % (query, params)
else:
query = params
self.path = urllib.parse.urlunsplit(('', '', helpers.requote_uri(path),
query, fragment))
self.url = urllib.parse.urlunsplit(
(scheme, netloc, self.path, '', ''))
def update_headers(self, headers):
"""Update request headers."""
self.headers = CIMultiDict()
if headers:
if isinstance(headers, dict):
headers = headers.items()
elif isinstance(headers, (MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
self.headers.add(key, value)
def update_auto_headers(self, skip_auto_headers):
self.skip_auto_headers = skip_auto_headers
used_headers = set(self.headers) | skip_auto_headers
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers.add(hdr, val)
# add host
if hdrs.HOST not in used_headers:
self.headers[hdrs.HOST] = self.netloc
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = self.SERVER_SOFTWARE
def update_cookies(self, cookies):
"""Update request cookies header."""
if not cookies:
return
c = http.cookies.SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, dict):
cookies = cookies.items()
for name, value in cookies:
if isinstance(value, http.cookies.Morsel):
# use dict method because SimpleCookie class modifies value
dict.__setitem__(c, name, value)
else:
c[name] = value
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
def update_content_encoding(self):
"""Set request content encoding."""
enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
if enc:
self.compress = enc
self.chunked = True # enable chunked, no need to deal with length
elif self.compress:
if not isinstance(self.compress, str):
self.compress = 'deflate'
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_auth(self, auth):
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
warnings.warn(
'BasicAuth() tuple is required instead ', DeprecationWarning)
auth = helpers.BasicAuth(*auth)
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, data, skip_auto_headers):
if not data:
return
if isinstance(data, str):
data = data.encode(self.encoding)
if isinstance(data, (bytes, bytearray)):
self.body = data
if (hdrs.CONTENT_TYPE not in self.headers and
hdrs.CONTENT_TYPE not in skip_auto_headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
if hdrs.CONTENT_LENGTH not in self.headers and not self.chunked:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
elif isinstance(data, (asyncio.StreamReader, streams.DataQueue)):
self.body = data
elif asyncio.iscoroutine(data):
self.body = data
if (hdrs.CONTENT_LENGTH not in self.headers and
self.chunked is None):
self.chunked = True
elif isinstance(data, io.IOBase):
assert not isinstance(data, io.StringIO), \
'attempt to send text data instead of binary'
self.body = data
if not self.chunked and isinstance(data, io.BytesIO):
# Not chunking if content-length can be determined
size = len(data.getbuffer())
self.headers[hdrs.CONTENT_LENGTH] = str(size)
self.chunked = False
elif not self.chunked and isinstance(data, io.BufferedReader):
# Not chunking if content-length can be determined
try:
size = os.fstat(data.fileno()).st_size - data.tell()
self.headers[hdrs.CONTENT_LENGTH] = str(size)
self.chunked = False
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
self.chunked = True
else:
self.chunked = True
if hasattr(data, 'mode'):
if data.mode == 'r':
raise ValueError('file {!r} should be open in binary mode'
''.format(data))
if (hdrs.CONTENT_TYPE not in self.headers and
hdrs.CONTENT_TYPE not in skip_auto_headers and
hasattr(data, 'name')):
mime = mimetypes.guess_type(data.name)[0]
mime = 'application/octet-stream' if mime is None else mime
self.headers[hdrs.CONTENT_TYPE] = mime
elif isinstance(data, MultipartWriter):
self.body = data.serialize()
self.headers.update(data.headers)
self.chunked = self.chunked or 8192
else:
if not isinstance(data, helpers.FormData):
data = helpers.FormData(data)
self.body = data(self.encoding)
if (hdrs.CONTENT_TYPE not in self.headers and
hdrs.CONTENT_TYPE not in skip_auto_headers):
self.headers[hdrs.CONTENT_TYPE] = data.content_type
if data.is_multipart:
self.chunked = self.chunked or 8192
else:
if (hdrs.CONTENT_LENGTH not in self.headers and
not self.chunked):
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_transfer_encoding(self):
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
if 'chunked' not in te:
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
self.chunked = self.chunked if type(self.chunked) is int else 8192
else:
if 'chunked' in te:
self.chunked = 8192
else:
self.chunked = None
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_expect_continue(self, expect=False):
if expect:
self.headers[hdrs.EXPECT] = '100-continue'
elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
expect = True
if expect:
self._continue = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def write_bytes(self, request, reader):
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
yield from self._continue
try:
if asyncio.iscoroutine(self.body):
exc = None
value = None
stream = self.body
while True:
try:
if exc is not None:
result = stream.throw(exc)
else:
result = stream.send(value)
except StopIteration as exc:
if isinstance(exc.value, bytes):
yield from request.write(exc.value, drain=True)
break
except:
self.response.close(True)
raise
if isinstance(result, asyncio.Future):
exc = None
value = None
try:
value = yield result
except Exception as err:
exc = err
elif isinstance(result, (bytes, bytearray)):
yield from request.write(result, drain=True)
value = None
else:
raise ValueError(
'Bytes object is expected, got: %s.' %
type(result))
elif isinstance(self.body, asyncio.StreamReader):
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
while chunk:
yield from request.write(chunk, drain=True)
chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
elif isinstance(self.body, streams.DataQueue):
while True:
try:
chunk = yield from self.body.read()
if chunk is EOF_MARKER:
break
yield from request.write(chunk, drain=True)
except streams.EofStream:
break
elif isinstance(self.body, io.IOBase):
chunk = self.body.read(self.chunked)
while chunk:
request.write(chunk)
chunk = self.body.read(self.chunked)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,)
for chunk in self.body:
request.write(chunk)
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
else:
try:
ret = request.write_eof()
# NB: in asyncio 3.4.1+ StreamWriter.drain() is coroutine
# see bug #170
if (asyncio.iscoroutine(ret) or
isinstance(ret, asyncio.Future)):
yield from ret
except Exception as exc:
new_exc = aiohttp.ClientRequestError(
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
reader.set_exception(new_exc)
self._writer = None
def send(self, writer, reader):
request = aiohttp.Request(writer, self.method, self.path, self.version)
if self.compress:
request.add_compression_filter(self.compress)
if self.chunked is not None:
request.enable_chunked_encoding()
request.add_chunking_filter(self.chunked)
# set default content-type
if (self.method in self.POST_METHODS and
hdrs.CONTENT_TYPE not in self.skip_auto_headers and
hdrs.CONTENT_TYPE not in self.headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
request.add_headers(
*((k, v)
for k, v in ((k, value)
for k, value in self.headers.items())))
request.send_headers()
self._writer = asyncio.async(
self.write_bytes(request, reader), loop=self.loop)
self.response = self.response_class(
self.method, self.url, self.host,
writer=self._writer, continue100=self._continue)
self.response._post_init(self.loop)
return self.response
@asyncio.coroutine
def close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
def terminate(self):
if self._writer is not None:
if hasattr(self.loop, 'is_closed'):
if not self.loop.is_closed():
self._writer.cancel()
else:
self._writer.cancel()
self._writer = None
class ClientResponse:
# from the Status-Line of the response
version = None # HTTP-Version
status = None # Status-Code
reason = None # Reason-Phrase
cookies = None # Response cookies (Set-Cookie)
content = None # Payload stream
headers = None # Response headers, CIMultiDictProxy
_connection = None # current connection
flow_control_class = FlowControlStreamReader # reader flow control
_reader = None # input stream
_response_parser = aiohttp.HttpResponseParser()
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_loop = None
_closed = True # to allow __del__ for non-initialized properly response
def __init__(self, method, url, host='', *, writer=None, continue100=None):
super().__init__()
self.method = method
self.url = url
self.host = host
self._content = None
self._writer = writer
self._continue = continue100
self._closed = False
self._should_close = True # override by message.should_close later
def _post_init(self, loop):
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def __del__(self, _warnings=warnings):
if self._closed:
return
self.close()
_warnings.warn("Unclosed response {!r}".format(self),
ResourceWarning)
context = {'client_response': self,
'message': 'Unclosed response'}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self):
out = io.StringIO()
print('<ClientResponse({}) [{} {}]>'.format(
self.url, self.status, self.reason), file=out)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self):
return self._connection
def waiting_for_continue(self):
return self._continue is not None
def _setup_connection(self, connection):
self._reader = connection.reader
self._connection = connection
self.content = self.flow_control_class(
connection.reader, loop=connection.loop)
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
"""Start response processing."""
self._setup_connection(connection)
while True:
httpstream = self._reader.set_parser(self._response_parser)
# read response
message = yield from httpstream.read()
if message.code != 100:
break
if self._continue is not None and not self._continue.done():
self._continue.set_result(True)
self._continue = None
# response status
self.version = message.version
self.status = message.code
self.reason = message.reason
self._should_close = message.should_close
# headers
self.headers = CIMultiDictProxy(message.headers)
# payload
response_with_body = self.method.lower() != 'head'
self._reader.set_parser(
aiohttp.HttpPayloadParser(message,
readall=read_until_eof,
response_with_body=response_with_body),
self.content)
# cookies
self.cookies = http.cookies.SimpleCookie()
if hdrs.SET_COOKIE in self.headers:
for hdr in self.headers.getall(hdrs.SET_COOKIE):
try:
self.cookies.load(hdr)
except http.cookies.CookieError as exc:
client_logger.warning(
'Can not load response cookies: %s', exc)
return self
def close(self, force=True):
if not force:
warnings.warn("force parameter should be True", DeprecationWarning,
stacklevel=2)
if self._closed:
return
self._closed = True
if hasattr(self._loop, 'is_closed'):
if self._loop.is_closed():
return
if self._connection is not None:
self._connection.close()
self._connection = None
self._cleanup_writer()
@asyncio.coroutine
def release(self):
try:
content = self.content
if content is not None and not content.at_eof():
chunk = yield from content.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from content.readany()
finally:
self._closed = True
if self._connection is not None:
self._connection.release()
if self._reader is not None:
self._reader.unset_parser()
self._connection = None
self._cleanup_writer()
def _cleanup_writer(self):
if self._writer is not None and not self._writer.done():
self._writer.cancel()
self._writer = None
@asyncio.coroutine
def wait_for_close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
yield from self.release()
@asyncio.coroutine
def read(self, decode=False):
"""Read response payload."""
if self._content is None:
try:
self._content = yield from self.content.read()
except:
self.close()
raise
else:
yield from self.release()
data = self._content
if decode:
warnings.warn(
'.read(True) is deprecated. use .json() instead',
DeprecationWarning)
return (yield from self.json())
return data
@asyncio.coroutine
def read_and_close(self, decode=False):
"""Read response payload and then close response."""
warnings.warn(
'read_and_close is deprecated, use .read() instead',
DeprecationWarning)
return (yield from self.read(decode))
def _get_encoding(self):
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
mtype, stype, _, params = helpers.parse_mimetype(ctype)
encoding = params.get('charset')
if not encoding:
encoding = chardet.detect(self._content)['encoding']
if not encoding:
encoding = 'utf-8'
return encoding
@asyncio.coroutine
def text(self, encoding=None):
"""Read response payload and decode."""
if self._content is None:
yield from self.read()
if encoding is None:
encoding = self._get_encoding()
return self._content.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None, loads=json.loads):
"""Read and decodes JSON response."""
if self._content is None:
yield from self.read()
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if 'json' not in ctype:
client_logger.warning(
'Attempt to decode JSON with unexpected mimetype: %s', ctype)
if not self._content.strip():
return None
if encoding is None:
encoding = self._get_encoding()
return loads(self._content.decode(encoding))
if PY_35:
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
yield from self.release()
else:
self.close()
| |
#!/usr/bin/python
import soundfile as sf
#import sounddevice as sd
import os
import os.path
import sys, getopt
import re
version="0.1"
time_pattern = r"((([0-9])?[0-9](:|\.))?([0-9])?[0-9](:|\.)?[0-9][0-9])"
time_pattern2 = r"(([0-9]?[0-9]):)?([0-9]?[0-9]):([0-9][0-9])"
nonValidChars = ".;[]{}#$&/()=~+^"
def sliceAudio(iFilename, names, times, verbose_en):
#open aduio
data, fs = sf.read(iFilename)
times.append(len(data)*fs)
# calculate time laps
for i in range(len(times)-1):
startPoint = times[i]*fs
endPoint = times[i+1]*fs
# write slice audio file
sf.write(names[i]+'.wav', data[startPoint:endPoint], fs)
if verbose_en == True:
print names[i]+'.wav'
def getNamesAndTimes_old(iFile):
times = []
names = []
with open(iFile, 'r') as list_file:
for line in list_file:
line = line.replace('.', '-')
line = line.replace(',', ' ')
line = line.replace(')', '-')
line = line.replace('(', '-')
line = line.strip()
# find the ':' to identify the time
idx = line.find(':')
if len(line)>6: # one leter for the name and 00:00
if idx == -1:
times.append(0)
names.append(line.strip())
else:
if line.count(':')==1:
sec = int(line[idx-2:idx])*60+int(line[idx+1:idx+3])
times.append(sec)
names.append(line[0:idx-2].strip())
elif line.count(':')==2:
sec = int(line[idx-2:idx])*3600+int(line[idx+1:idx+3])*60+int(line[idx+4:idx+7])
times.append(sec)
names.append(line[0:idx-2].strip())
return names,times
def getNamesAndTimes (iFile):
times = []
names = []
with open (iFile, 'r') as list_file:
for line in list_file:
if len(line.strip())==0:
continue
match = re.search(time_pattern, line)
if match==None:
sec = 0
line = line.translate(None, nonValidChars).strip()
else:
line = line.replace(match.group(), "").translate(None, nonValidChars).strip()
time_str = match.group()
match = re.search(time_pattern2, time_str)
none, hours, minutes, seconds = match.groups()
hours = 0 if hours==None else hours
sec = (int(hours)*3600)+(int(minutes)*60)+int(seconds)
times.append(sec)
names.append(line)
return names, times
def convert2fmt(names, fmt, keep_wav, verbose_en):
#Convert to mp3
for name in names:
if verbose_en == True:
cmd = 'avconv -i '+'"'+name+'.wav'+'" '+'"'+name+'.'+fmt+'"'
print cmd
else:
cmd = 'avconv -i '+'"'+name+'.wav'+'" '+'"'+name+'.'+fmt+'"'+' -v quiet'
os.system(cmd)
#print cmd
if verbose_en == True:
print '******************************************************'
print name+'.'+fmt, 'Created'
print '******************************************************'
if keep_wav == False:
cmd = 'rm '+'"'+name+'.wav'+'" '
#print cmd
os.system(cmd)
if verbose_en == True:
print name+'.'+'wav', 'Removed'
print '******************************************************'
def showHelp():
print "----------------------------------------------SPLIT AUDIO-----------------------------------------------"
print "splitaudio is a python program to split audio into tracks"
print "from a list of names and times."
print " "
print "Usage:"
print " splitaudio --input <input_file.wav> --list <list_of_names.txt> ..."
print " "
print "Arguments:"
print ".......Options....... .....Arguments..... .............Description.................................."
print " Long Short"
print "--input -i <input_file.wav> : input file, should be a wav file."
print "--list -l <list_of_names> : a text file with the list of names and times."
print "--format -f <mp3 | aiff | ogg> : the output audio format. default is wav"
print "--keep-wav -k : keep the sliced .wav files if other format is configured"
print "--help -h : display help"
print "--verbose -v : print messages"
print " "
print "Format of the lines in list: "
print " <track number>. <Name> <minutes:seconds>"
print " <##>. <Name> <mm:ss>"
print " Example:"
print " 4. Hello (Buddy) 8:07"
print " "
print " "
#iFilename = 'Man From Another Time - Seasick Steve (full album)-dyb6ymIaWjc.wav'
if __name__ == "__main__":
# identify the arguments
# --input -i <input_file.wav> : input file, should be a wav file.
# --list -l <list_of_names> : a text file with the list of names and times.
# --format -f <mp3 | aiff | ogg> : the output audio format. default is wav
# --keep-wav -k : keep the sliced .wav files if other format is configured
# --help -h <argument> : display help
# --verbose -v : print messages
argv = sys.argv[1:]
short_opt = "i:l:f:khv"
long_opt = ['input=', 'list=', 'format=', 'keep-wav', 'help', 'verbose']
supported_formats = ['mp3', 'aiff', 'ogg']
iaudiofile = ''
ilistfile = ''
au_format = ''
keep_wav_en= False
help_en = False
verbose_en = False
#parse arguments
try:
opts, args = getopt.getopt(argv, short_opt, long_opt)
except getopt.GetoptError:
print "error in command"
print "splitaudio --input <input_file.wav> --list <list_of_names.txt>"
sys.exit(2)
#get configurations form arguments
for opt, arg in opts:
if opt in ("-i", "--input"):
if os.path.isfile(arg) and os.access(arg, os.R_OK):
iaudiofile = arg
else:
print "error when trying to access "+arg
print "Either file is missing or is not readable"
sys.exit(2)
elif opt in ('-l', '--list'):
if os.path.isfile(arg) and os.access(arg, os.R_OK):
ilistfile = arg
else:
print "error when trying to access "+arg
print "Either file is missing or is not readable"
sys.exit(2)
elif opt in ('-f', '--format'):
if arg in supported_formats:
au_format = arg
else:
print "format not supported"
sys.exit(2)
elif opt in ('-k', '--keep-wav'):
keep_wav_en = True
elif opt in ('-h', '--help'):
help_en = True
elif opt in ('-v', '--verbose'):
verbose_en = True
#help
if help_en == True:
showHelp()
if iaudiofile=='' or ilistfile=='':
showHelp()
sys.exit(1)
# files
if verbose_en == True:
print "Input Audio File : "+iaudiofile
print "Output List of Names: "+ilistfile
print "Getting the Names and times form the list..."
names, times = getNamesAndTimes(ilistfile)
if verbose_en == True:
print "Time [sec] Name"
for k in range(0, len(names)):
print " ", str(times[k]).ljust(10), " ", names[k]
print 'Spliting audio and creating .wav files'
sliceAudio(iaudiofile, names, times, verbose_en)
if au_format != '':
print 'Converting .wav to', '.'+au_format, 'format'
convert2fmt(names, au_format, keep_wav_en, verbose_en)
#print 'getting the name list and times'
#names, times = getNamesAndTimes('list.txt')
#print 'slicing audio and crating wav files'
#sliceAudio(iFilename, names, times)
#print 'converting to mp3 and removing wav'
#convert2fmt(names, 'mp3')
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._runs_operations import build_cancel_request_initial, build_get_log_sas_url_request, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RunsOperations:
"""RunsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.RunListResult"]:
"""Gets all the runs for a registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param filter: The runs filter to apply on the operation. Arithmetic operators are not
supported. The allowed string function is 'contains'. All logical operators except 'Not',
'Has', 'All' are allowed.
:type filter: str
:param top: $top is supported for get list of runs, which limits the maximum number of runs to
return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RunListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_06_01_preview.models.RunListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RunListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
run_id: str,
**kwargs: Any
) -> "_models.Run":
"""Gets the detailed information for a given run.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param run_id: The run ID.
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Run, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.Run
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
run_id=run_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
run_id: str,
run_update_parameters: "_models.RunUpdateParameters",
**kwargs: Any
) -> "_models.Run":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(run_update_parameters, 'RunUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
run_id=run_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Run', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
run_id: str,
run_update_parameters: "_models.RunUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.Run"]:
"""Patch the run properties.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param run_id: The run ID.
:type run_id: str
:param run_update_parameters: The run update properties.
:type run_update_parameters:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.RunUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Run or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_06_01_preview.models.Run]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
run_id=run_id,
run_update_parameters=run_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}'} # type: ignore
@distributed_trace_async
async def get_log_sas_url(
self,
resource_group_name: str,
registry_name: str,
run_id: str,
**kwargs: Any
) -> "_models.RunGetLogResult":
"""Gets a link to download the run logs.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param run_id: The run ID.
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunGetLogResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.RunGetLogResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunGetLogResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_log_sas_url_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
run_id=run_id,
template_url=self.get_log_sas_url.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunGetLogResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_sas_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}/listLogSasUrl'} # type: ignore
async def _cancel_initial(
self,
resource_group_name: str,
registry_name: str,
run_id: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
run_id=run_id,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}/cancel'} # type: ignore
@distributed_trace_async
async def begin_cancel(
self,
resource_group_name: str,
registry_name: str,
run_id: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Cancel an existing run.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param run_id: The run ID.
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
run_id=run_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/runs/{runId}/cancel'} # type: ignore
| |
#!/usr/bin/python
'''
Tests for MPS and MPO
'''
from mpslib import *
def test_mps(hndim):
'''
Test function for constructing matrix product
hndim:
The number of states on each site.
'''
nsite=10 #number of sites
vec=random.random(hndim**nsite) #a random state in form of 1D array.
t0=time.time()
mps=state2MPS(vec,sitedim=hndim,l=nsite/2,method='svd') #parse the state into a <MPS> instance.
t1=time.time()
print 'Get MPS: %s, Elapse -> %s'%(mps,t1-t0)
t0=time.time()
qu=mps.query(zeros(nsite)) #query the state of a specified site config.
t1=time.time()
print 'Query the first site: %s(true: %s), Elapse -> %s'%(qu,vec[0],t1-t0)
t0=time.time()
nstate=mps.state #recover the 1D array state representation.
t1=time.time()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
#show it graphically.
ion()
mps.show()
pdb.set_trace()
def test_vmps(hndim):
'''
Test function for constructing matrix product
hndim:
The number of states on each site.
'''
nsite=10 #number of sites
vec=random.random(hndim**nsite)/sqrt(hndim**nsite/2.) #a random state in form of 1D array.
t0=time.time()
vmps=state2VMPS(vec,sitedim=hndim) #parse the state into a <MPS> instance.
t0=time.time()
nstate=vmps.state #recover the 1D array state representation.
t1=time.time()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
vmps.check_canonical()
ion()
vmps.show()
print '\nChanging to canonical form!'
print '###########################'
mps=vmps.canonical(6)
t0=time.time()
nstate=mps.state #recover the 1D array state representation.
t1=time.time()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
t1=time.time()
print 'Get MPS: %s, Elapse -> %s'%(mps,t1-t0)
#mps.show()
pdb.set_trace()
def test_mps(hndim):
'''
Test function for constructing matrix product
hndim:
The number of states on each site.
'''
nsite=10 #number of sites
l=3
vec=random.random(hndim**nsite) #a random state in form of 1D array.
vec2=random.random(hndim**nsite) #a random state in form of 1D array.
t0=time.time()
mps=state2MPS(vec,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
mps2=state2MPS(vec2,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
t1=time.time()
print 'Get MPS: %s, Elapse -> %s'%(mps,t1-t0)
t0=time.time()
qu=mps.query(zeros(nsite)) #query the state of a specified site config.
t1=time.time()
print 'Query the first site: %s(true: %s), Elapse -> %s'%(qu,vec[0],t1-t0)
t0=time.time()
nstate=mps.state #recover the 1D array state representation.
t1=time.time()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
print 'Checking for unitary for M-matrices.\n',mps.check_canonical()
#test for multiplication
time.time()
mpsbra=mps.hconj
overlap=mpsbra*mps
t1=time.time()
print 'Overlap %s(true %s), Elapse -> %s.'%(overlap,(vec.conj()*vec).sum(),t1-t0)
#show it graphically.
ion()
mps.show()
mpsbra.show(offset=(0,2))
pdb.set_trace()
def test_compress(hndim):
'''
Test for addition of two <MPS> instances.
'''
nsite=10 #number of sites
l=3
vec=random.random(hndim**nsite) #a random state in form of 1D array.
vec2=random.random(hndim**nsite) #a random state in form of 1D array.
#vec2=vec
vecadded=vec+vec2
mps=state2MPS(vec,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
mps2=state2MPS(vec2,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
mpsadded=mps_add(mps,mps2)
print mpsadded
t0=time.time()
nstate=mps.state #recover the 1D array state representation.
nstateadded=mpsadded.state #recover the 1D array state representation.
t1=time.time()
print 'State tolerence %s Elapse -> %s'%(sum(abs(nstateadded-vecadded)),t1-t0)
t0=time.time()
mpsadded.compress()
t1=time.time()
nstateadded=mpsadded.state #recover the 1D array state representation.
print mpsadded
print 'State tolerence(after compress) %s Elapse -> %s'%(sum(abs(nstateadded-vecadded)),t1-t0)
pdb.set_trace()
def test_add(hndim):
'''
Test for addition of two <MPS> instances.
'''
nsite=10 #number of sites
l=3
vec=random.random(hndim**nsite) #a random state in form of 1D array.
vec2=random.random(hndim**nsite) #a random state in form of 1D array.
vecadded=vec+vec2
mps=state2MPS(vec,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
mps2=state2MPS(vec2,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
mpsadded=mps_add(mps,mps2)
print mps
print mpsadded
t0=time.time()
nstate=mps.state #recover the 1D array state representation.
nstate2=mps2.state #recover the 1D array state representation.
nstateadded=mpsadded.state #recover the 1D array state representation.
t1=time.time()
print 'State tolerence(first,second,added) %s, %s, %s Elapse -> %s'%(sum(abs(nstate-vec)),sum(abs(nstate2-vec2)),sum(abs(nstateadded-vecadded)),t1-t0)
pdb.set_trace()
def test_move(hndim):
'''
Test for canonical move of <MPS>.
'''
ion()
nsite=10 #number of sites
l=3
vec=random.random(hndim**nsite) #a random state in form of 1D array.
mps=state2MPS(vec,sitedim=hndim,l=l,method='svd') #parse the state into a <MPS> instance.
t0=time.time()
nstate=mps.state #recover the 1D array state representation.
t1=time.time()
mps.show()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
pdb.set_trace()
t0=time.time()
mps>>(1,1e-5)
t1=time.time()
nstate=mps.state #recover the 1D array state representation.
cla()
mps.show()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
pdb.set_trace()
t0=time.time()
mps<<(4,1e-6)
t1=time.time()
nstate=mps.state #recover the 1D array state representation.
cla()
mps.show()
print 'State tolerence %s, Elapse -> %s'%(sum(abs(nstate-vec)),t1-t0)
pdb.set_trace()
if __name__=='__main__':
test_compress(3)
| |
import functools
from operator import mul
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import max_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer import utils
from chainer.utils import conv_nd
class MaxPoolingND(pooling_nd._PoolingND):
"""Max pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True):
utils.experimental('chainer.functions.pooling.MaxPoolingND')
super(MaxPoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all)
def forward_cpu(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
col_shape = (n, c) + (functools.reduce(mul, ksize),) + outs
col = col.reshape(col_shape)
# We select maximum twice, since the implementation using numpy.choose
# hits its bug when kh * kw >= 32.
self.indexes = col.argmax(axis=2)
y = col.max(axis=2)
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
self.retain_inputs((0,))
return super(MaxPoolingND, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x[0].dtype)
self.indexes = cuda.cupy.empty(y_shape, dtype=numpy.int32)
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelForward.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x[0].reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(y, self.indexes)))
return y,
def backward(self, indexes, gy):
return MaxPoolingNDGrad(self).apply(gy)
def create_pool_desc(self):
return cuda.cudnn.create_pooling_descriptor(
self.ksize, self.stride, self.pad,
cuda.cuda.cudnn.CUDNN_POOLING_MAX)
class MaxPoolingNDGrad(function_node.FunctionNode):
def __init__(self, mpoolnd):
self.ndim = mpoolnd.ndim
self.ksize = mpoolnd.ksize
self.stride = mpoolnd.stride
self.pad = mpoolnd.pad
self.cover_all = mpoolnd.cover_all
self._used_cudnn = mpoolnd._used_cudnn
if not self._used_cudnn:
self.indexes = mpoolnd.indexes
self._in_shape = mpoolnd._in_shape
self._in_dtype = mpoolnd._in_dtype
self.mpoolnd = mpoolnd
def forward_cpu(self, gy):
ndim = self.ndim
n, c = gy[0].shape[:2]
outs = gy[0].shape[2:]
dims = self._in_shape[2:]
prod_outs = functools.reduce(mul, outs)
prod_ksize = functools.reduce(mul, self.ksize)
gcol = numpy.zeros(
n * c * prod_outs * prod_ksize, dtype=self._in_dtype)
indexes = self.indexes.flatten()
indexes += numpy.arange(0, indexes.size * prod_ksize, prod_ksize)
gcol[indexes] = gy[0].ravel()
gcol_shape = (n, c) + outs + self.ksize
gcol = gcol.reshape(gcol_shape)
for i in six.moves.range(ndim):
gcol = numpy.swapaxes(gcol, 2 + i, ndim + 2 + i)
gx = conv_nd.col2im_nd_cpu(gcol, self.stride, self.pad, dims)
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x, = self.mpoolnd.get_retained_inputs()
return self.mpoolnd.backward_gpu((x.data,), gy)
n, c = self._in_shape[:2]
dims = self._in_shape[2:]
ys = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
ndim = self.ndim
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelBackward.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy[0].reduced_view(), self.indexes.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad + (gx,)))
return gx,
def backward(self, indexes, ggx):
return MaxPoolingNDWithIndexes(self.mpoolnd).apply(ggx)
class MaxPoolingNDWithIndexes(function_node.FunctionNode):
def __init__(self, mpoolnd):
self.ndim = mpoolnd.ndim
self.ksize = mpoolnd.ksize
self.stride = mpoolnd.stride
self.pad = mpoolnd.pad
self.cover_all = mpoolnd.cover_all
self._used_cudnn = mpoolnd._used_cudnn
if not self._used_cudnn:
self.indexes = mpoolnd.indexes
else:
self.mpoolnd = mpoolnd
def forward_cpu(self, x):
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
ksize_total = functools.reduce(mul, ksize)
col_shape = (n, c) + (ksize_total,) + outs
col = col.reshape(col_shape)
# (n, c, out_1, ..., out_N, k_1 * .. * k_N)
col_indexes = (0, 1) + tuple(six.moves.range(3, 3 + self.ndim)) + (2,)
col = col.transpose(col_indexes)
col = col.reshape(-1, ksize_total)
indexes = self.indexes.ravel()
col = col[numpy.arange(len(indexes)), indexes]
return col.reshape((n, c) + outs),
def forward_gpu(self, inputs):
if self._used_cudnn:
x, = self.mpoolnd.get_retained_inputs()
return self._forward_gpu_compute_indexes_again((x.data, inputs[0]))
x, = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes
in_params, out_params, operation, name = cls.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(self.indexes.reduced_view(), y)))
return y,
def _forward_gpu_compute_indexes_again(self, inputs):
x, ggx = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes1
in_params, out_params, operation, name = cls.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(ggx.reduced_view(), y)))
return y,
def max_pooling_nd(x, ksize, stride=None, pad=0, cover_all=True):
"""N-dimensionally spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~functions.max_pooling_2d`. This acts similarly to
:class:`~functions.ConvolutionND`, but it computes the maximum of input
spatial patch for each channel without any parameter instead of computing
the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s,s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
Returns:
~chainer.Variable: Output variable.
"""
ndim = len(x.shape[2:])
return MaxPoolingND(ndim, ksize, stride, pad, cover_all).apply((x,))[0]
| |
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
from . import inlinepatterns
def build_treeprocessors(md_instance, **kwargs):
""" Build the default treeprocessors for Markdown. """
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors
def isString(s):
""" Check if it's string """
if not isinstance(s, util.AtomicString):
return isinstance(s, util.string_type)
return False
class Treeprocessor(util.Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass #pragma: no cover
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__(self, md):
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = util.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
self.inlinePatterns = md.inlinePatterns
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = util.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
while patternIndex < len(self.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = list(node).index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + list(node):
if child.tail:
if child.tail.strip():
self.__processElementText(node, child,False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
# We don't want to loose the AtomicString
text = util.AtomicString(text)
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData)+match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
for child in [node] + list(node):
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement:
if child.text and not isinstance(child.text, util.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = list(currElement).index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if len(child):
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = \
inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = \
inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text and isString(newChild.text):
newChild.text = \
inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and util.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
# Clean up extra empty lines at end of code blocks.
pres = root.getiterator('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n')
| |
import math, numpy, sklearn.metrics.pairwise as sk
from cvxopt import matrix, solvers
from svmutil import *
#from grid import *
import random, sys
class Model(object):
def __init__(self):
self.model = None
self.sweight = 1.0
self.tweight = 1.0
self.__trainLabelOrder = []
"""
Compute instance (importance) weights using Kernel Mean Matching.
Returns a list of instance weights for training data.
"""
def __kmm(self, Xtrain, Xtest, sigma):
n_tr = len(Xtrain)
n_te = len(Xtest)
#calculate Kernel
print 'Computing kernel for training data ...'
K_ns = sk.rbf_kernel(Xtrain, Xtrain, sigma)
#make it symmetric
K = 0.5*(K_ns + K_ns.transpose())
#calculate kappa
print 'Computing kernel for kappa ...'
kappa_r = sk.rbf_kernel(Xtrain, Xtest, sigma)
ones = numpy.ones(shape=(n_te, 1))
kappa = numpy.dot(kappa_r, ones)
kappa = -(float(n_tr)/float(n_te)) * kappa
#calculate eps
eps = (math.sqrt(n_tr) - 1)/math.sqrt(n_tr)
#constraints
A0 = numpy.ones(shape=(1,n_tr))
A1 = -numpy.ones(shape=(1,n_tr))
A = numpy.vstack([A0, A1, -numpy.eye(n_tr), numpy.eye(n_tr)])
b = numpy.array([[n_tr*(eps+1), n_tr*(eps-1)]])
b = numpy.vstack([b.T, -numpy.zeros(shape=(n_tr,1)), numpy.ones(shape=(n_tr,1))* 200])
print 'Solving quadratic program for beta ...'
P = matrix(K, tc='d')
q = matrix(kappa, tc='d')
G = matrix(A, tc='d')
h = matrix(b, tc='d')
beta = solvers.qp(P,q,G,h)
return [i for i in beta['x']]
"""
Build a SVM model.
"""
def __build(self, trainX, trainY, beta, svmParam):
prob = svm_problem(beta, trainY, trainX)
# param = svm_parameter('-s 0 -c 131072 -t 2 -q -b 1 -g 0.0001')
param = svm_parameter('-s 0 -t 2 -q -b 1 -c ' + str(svmParam['c']) + ' -g ' + str(svmParam['g']))
return svm_train(prob, param)
# """
# Compute distance between two
# """
# def __computeDistanceSq(self, d1, d2):
# dist = 0
# for i in d1:
# if i in d2:
# #when d1 and d2 have the same feature
# dist += ((d1[i] - d2[i]) ** 2)
# else:
# #feature in d1 only
# dist += (d1[i] ** 2)
# for i in d2:
# #feature in d2 only
# if i not in d1:
# dist += (d2[i] ** 2)
# return dist
"""
Kernel width is the median of distances between instances of sparse data
"""
def __computeKernelWidth(self, data):
dist = []
for i in xrange(len(data)):
for j in range(i+1, len(data)):
# s = self.__computeDistanceSq(data[i], data[j])
# dist.append(math.sqrt(s))
dist.append(numpy.sqrt(numpy.sum((numpy.array(data[i]) - numpy.array(data[j])) ** 2)))
return numpy.median(numpy.array(dist))
"""
Initialize training of a new weighted SVM model by choosing best parameters.
Sets the trained model for this object.
"""
def train(self, traindata, testdata, maxvar):
beta = []
trainY = []
trainX = []
testX = []
#SVM parameter selection
# with open('train_svmpar.data', 'w') as f:
# for d in traindata:
# # if d[-1] not in self.__trainLabelOrder:
# # self.__trainLabelOrder.append(d[-1])
# line = str(d[-1])
# for c in sorted(d):
# if c != -1:
# line += ' ' + str(c) + ':' + str(d[c])
# f.write(line + '\n')
# rate, svmParam = find_parameters('train_svmpar.data', '-log2c 1,100,10 -log2g -10,0,2 -gnuplot null -out null')
svmParam = {'c':1000, 'g':0.1}
#Subsample training data if given data size is more than 1000
newtraindata = []
if len(traindata) <= 200:
newtraindata = traindata
else:
seen = []
while len(seen)<200:
r = random.randint(0, 2995)
if r not in seen:
seen.append(r)
newtraindata.append(traindata[r])
#Data preparation for computing beta.
#Data format: space separated <index:value> with class index as -1.
for d in newtraindata:
if d[-1] not in self.__trainLabelOrder:
self.__trainLabelOrder.append(d[-1])
trainY.append(d[-1])
covar = []
for c in xrange(maxvar):
if c in d:
covar.append(d[c])
else:
covar.append(0.0)
trainX.append(covar)
if testdata == None:
for c in xrange(len(trainX)):
beta.append(1.0)
else:
# gammab = 0.001
gammab = self.__computeKernelWidth(trainX)
for d in testdata:
covar = []
for c in xrange(maxvar):
if c in d:
covar.append(d[c])
else:
covar.append(0.0)
testX.append(covar)
beta = self.__kmm(trainX, testX, gammab)
#Model training
self.model = self.__build(trainX, trainY, beta, svmParam)
"""
Test the weighted SVM to predict labels of a given test data.
Returns the result of prediction, each of the form <label, probability, true label>
"""
def test(self, testdata, maxvar):
#Data preparation for model prediction
#Data format: space separated <index:value> with class index as -1.
testX = []
testY = []
for d in testdata:
# if d[-1] not in self.__trainLabelOrder:
# self.__trainLabelOrder.append(d[-1])
testY.append(d[-1])
covar = []
for c in xrange(maxvar):
if c in d:
covar.append(d[c])
else:
covar.append(0.0)
testX.append(covar)
#predict and gather results
res = svm_predict(testY, testX, self.model, '-q -b 1') #returns <label, accuracy, value>
result = []
for i in xrange(len(res[0])):
result.append([res[0][i], res[2][i][self.__trainLabelOrder.index(res[0][i])], testY[i]])
return result
"""
Compute weight of a source model using its error rate
"""
def __computeWeight(self, errorRate):
if errorRate <= 0.5:
if errorRate == 0:
errorRate = 0.01
return 0.5*math.log((1-errorRate)/errorRate)
else:
return 0.01
"""
Set model weights using test prediction.
For source weight, use error rate with known source data labels.
For target weight, use confidence (or probability) measure on target data.
"""
def computeModelWeight(self, data, isSource, maxvar):
result = self.test(data, maxvar)
if isSource:
#for source weight
err = 0
for i in xrange(len(result)):
if result[i][0] != data[i][-1]:
err += 1
self.sweight = self.__computeWeight(float(err)/len(data))
else:
#for target weight
conf = 0.0
for r in result:
conf += r[1]
self.tweight = (conf/len(result))
#"""
#FOR TESTING
#"""
#if __name__ == '__main__':
# traindata = []
# testdata = []
# labels = []
# maxvar = 5
# for i in xrange(10):
# y = random.randint(0,2)
# x = {-1:y}
# for j in xrange(maxvar):
# x[j] = (random.randint(0,100))
#
# if y not in labels:
# labels.append(y)
# traindata.append(x)
#
# for i in xrange(5):
# y = random.randint(0,2)
# x = {-1:y}
# for j in xrange(maxvar):
# x[j] = (random.randint(0,100))
#
# testdata.append(x)
#
# model = Model()
# model.train(traindata,testdata, maxvar)
# model.test(testdata, maxvar)
# print labels
| |
"""codecontext - display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the maxlines
variable in the codecontext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import re
from sys import maxsize as INFINITY
import tkinter
from tkinter.constants import NSEW, SUNKEN
from idlelib.config import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with", "async"}
def get_spaces_firstword(codeline, c=re.compile(r"^(\s*)(\w*)")):
"Extract the beginning whitespace and first word from codeline."
return c.match(codeline).groups()
def get_line_info(codeline):
"""Return tuple of (line indent value, codeline, block start keyword).
The indentation of empty lines (or comment lines) is INFINITY.
If the line does not start a block, the keyword value is False.
"""
spaces, firstword = get_spaces_firstword(codeline)
indent = len(spaces)
if len(codeline) == indent or codeline[indent] == '#':
indent = INFINITY
opener = firstword in BLOCKOPENERS and firstword
return indent, codeline, opener
class CodeContext:
"Display block context above the edit window."
UPDATEINTERVAL = 100 # millisec
def __init__(self, editwin):
"""Initialize settings for context block.
editwin is the Editor window for the context block.
self.text is the editor window text widget.
self.context displays the code context text above the editor text.
Initially None, it is toggled via <<toggle-code-context>>.
self.topvisible is the number of the top text line displayed.
self.info is a list of (line number, indent level, line text,
block keyword) tuples for the block structure above topvisible.
self.info[0] is initialized with a 'dummy' line which
starts the toplevel 'block' of the module.
self.t1 and self.t2 are two timer events on the editor text widget to
monitor for changes to the context text or editor font.
"""
self.editwin = editwin
self.text = editwin.text
self._reset()
def _reset(self):
self.context = None
self.cell00 = None
self.t1 = None
self.topvisible = 1
self.info = [(0, -1, "", False)]
@classmethod
def reload(cls):
"Load class variables from config."
cls.context_depth = idleConf.GetOption("extensions", "CodeContext",
"maxlines", type="int",
default=15)
def __del__(self):
"Cancel scheduled events."
if self.t1 is not None:
try:
self.text.after_cancel(self.t1)
except tkinter.TclError:
pass
self.t1 = None
def toggle_code_context_event(self, event=None):
"""Toggle code context display.
If self.context doesn't exist, create it to match the size of the editor
window text (toggle on). If it does exist, destroy it (toggle off).
Return 'break' to complete the processing of the binding.
"""
if self.context is None:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through getint(), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required horizontal padding and border width.
padx = 0
border = 0
for widget in widgets:
info = (widget.grid_info()
if widget is self.editwin.text
else widget.pack_info())
padx += widget.tk.getint(info['padx'])
padx += widget.tk.getint(widget.cget('padx'))
border += widget.tk.getint(widget.cget('border'))
self.context = tkinter.Text(
self.editwin.text_frame,
height=1,
width=1, # Don't request more than we get.
highlightthickness=0,
padx=padx, border=border, relief=SUNKEN, state='disabled')
self.update_font()
self.update_highlight_colors()
self.context.bind('<ButtonRelease-1>', self.jumptoline)
# Get the current context and initiate the recurring update event.
self.timer_event()
# Grid the context widget above the text widget.
self.context.grid(row=0, column=1, sticky=NSEW)
line_number_colors = idleConf.GetHighlight(idleConf.CurrentTheme(),
'linenumber')
self.cell00 = tkinter.Frame(self.editwin.text_frame,
bg=line_number_colors['background'])
self.cell00.grid(row=0, column=0, sticky=NSEW)
menu_status = 'Hide'
else:
self.context.destroy()
self.context = None
self.cell00.destroy()
self.cell00 = None
self.text.after_cancel(self.t1)
self._reset()
menu_status = 'Show'
self.editwin.update_menu_label(menu='options', index='* Code Context',
label=f'{menu_status} Code Context')
return "break"
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Return a list of block line tuples and the 'last' indent.
The tuple fields are (linenum, indent, text, opener).
The list represents header lines from new_topvisible back to
stopline with successively shorter indents > stopindent.
The list is returned ordered by line number.
Last indent returned is the smallest indent observed.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in.
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
codeline = self.text.get(f'{linenum}.0', f'{linenum}.end')
indent, text, opener = get_line_info(codeline)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# Also show the if statement.
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
No update is done if the text hasn't been scrolled. If the text
was scrolled, the lines that should be shown in the context will
be retrieved and the context area will be updated with the code,
up to the number of maxlines.
"""
new_topvisible = self.editwin.getlineno("@0,0")
if self.topvisible == new_topvisible: # Haven't scrolled.
return
if self.topvisible < new_topvisible: # Scroll down.
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# Retain only context info applicable to the region
# between topvisible and new_topvisible.
while self.info[-1][1] >= lastindent:
del self.info[-1]
else: # self.topvisible > new_topvisible: # Scroll up.
stopindent = self.info[-1][1] + 1
# Retain only context info associated
# with lines above new_topvisible.
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# Last context_depth context lines.
context_strings = [x[2] for x in self.info[-self.context_depth:]]
showfirst = 0 if context_strings[0] else 1
# Update widget.
self.context['height'] = len(context_strings) - showfirst
self.context['state'] = 'normal'
self.context.delete('1.0', 'end')
self.context.insert('end', '\n'.join(context_strings[showfirst:]))
self.context['state'] = 'disabled'
def jumptoline(self, event=None):
"Show clicked context line at top of editor."
lines = len(self.info)
if lines == 1: # No context lines are showing.
newtop = 1
else:
# Line number clicked.
contextline = int(float(self.context.index('insert')))
# Lines not displayed due to maxlines.
offset = max(1, lines - self.context_depth) - 1
newtop = self.info[offset + contextline][0]
self.text.yview(f'{newtop}.0')
self.update_code_context()
def timer_event(self):
"Event on editor text widget triggered every UPDATEINTERVAL ms."
if self.context is not None:
self.update_code_context()
self.t1 = self.text.after(self.UPDATEINTERVAL, self.timer_event)
def update_font(self):
if self.context is not None:
font = idleConf.GetFont(self.text, 'main', 'EditorWindow')
self.context['font'] = font
def update_highlight_colors(self):
if self.context is not None:
colors = idleConf.GetHighlight(idleConf.CurrentTheme(), 'context')
self.context['background'] = colors['background']
self.context['foreground'] = colors['foreground']
if self.cell00 is not None:
line_number_colors = idleConf.GetHighlight(idleConf.CurrentTheme(),
'linenumber')
self.cell00.config(bg=line_number_colors['background'])
CodeContext.reload()
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_codecontext', verbosity=2, exit=False)
# Add htest.
| |
__all__ = ["Pattern"]
from ..utils import isfunction
import lasagne.objectives
from lasagne.layers import get_all_layers
from lasagne.layers import InputLayer
from lasagne.layers import Layer
import theano.tensor as T
import numpy as np
import itertools
from collections import OrderedDict
import inspect
import copy
import os
import tarfile
from warnings import warn
class Pattern(object):
"""
The :class:`Pattern` class represents a side information pattern and
should be subclassed when implementing a new pattern.
It is similar to :class:`lasagne.layers.Layer` and mimics some of
its functionality, but does not inherit from it.
*How to implement your own pattern?*
A minimal example should implement the following functions:
- get_side_objective
- default_target_objective
- default_side_objective
- default_beta_input
- default_beta_output_shape
See the docstrings for each function to understand what it should do.
Depending on the pattern, you also might need to override (usually if you
need additional side variables):
- training_input_vars
- side_vars
If you beta has multiple inputs, you will need to implement:
- get_beta_output_for
Optionally, if your side variable is a supervised learning target, then you
should return the theano variable representing this target in the method:
- side_target_var(self)
Parameters
----------
phi : lasagne layer
a lasagne layer for computing the intermediate representation
:math:`\phi(s)=y` from the input x
psi : lasagne layer
a lasagne layer for computing the prediction of the target
from the intermediate representation s, :math:`\psi(s)=y`
target_var : theano tensor variable
Theano variable representing the target. Required for formulating the target loss.
side_var: theano tensor variable
Theano variable representing the side information.
The semantics of this variable depend on the pattern.
Note that additional side variables might be required by a pattern.
input_shape : int or tuple
Shape of the input variable
target_shape : int or tuple
Shape of the target variable
side_shape : int or tuple
Shape of the side information variable
representation_shape : int or tuple
Shape of the intermediate representation to be learned
(for some patterns that may coincide with the side_shape)
target_loss: theano tensor variable, optional
Function (e.g. lasagne objective) for the optimizing the target.
All patterns have standard objectives applicable here
side_loss: theano tensor variable, optional
Theano expression or lasagne objective for the side loss.
Most patterns have standard objectives applicable here.
name : string, optional
An optional name to attach to this layer.
"""
PHI_OUTPUT_SHAPE='PHI_OUTPUT_SHAPE'
PSI_OUTPUT_SHAPE='PSI_OUTPUT_SHAPE'
BETA_OUTPUT_SHAPE='BETA_OUTPUT_SHAPE'
def __init__(self,
phi, psi, beta=None,
input_var=None, target_var=None, side_var=None,
input_shape=None, target_shape=None, side_shape=None,
representation_shape=None,
target_loss=None, side_loss=None,
name=None):
self.phi = phi
self.psi = psi
self.beta = beta
self.input_var = input_var
self.target_var = target_var
self.side_var = side_var
self.input_shape = input_shape
self.target_shape = target_shape
self.side_shape = side_shape
self.representation_shape = representation_shape
self.target_loss = target_loss
self.target_loss_fn = None
self.side_loss = side_loss
self.side_loss_fn = None
self.name = name
self.input_layer = None
self.get_output_kwargs = []
if isfunction(self.target_loss):
self.target_loss_fn = self.target_loss
self.target_loss = None
elif self.target_loss is not None:
warn("target_loss: passing something different than a python function object "
"to the constructor of a Pattern is deprecated. "
"Recommended way is to use a function from lasagne.objectives "
"or equivalent." )
if isfunction(self.side_loss):
self.side_loss_fn = self.side_loss
self.side_loss = None
elif self.side_loss is not None:
warn("side_loss: passing something different than a python function object "
"to the constructor of a Pattern is deprecated. "
"Recommended way is to use a function from lasagne.objectives "
"or equivalent." )
# convert phi, psi and beta to real lasagne layers if they
# are passed as a list/dictionary
if isinstance(phi, list) or isinstance(phi, tuple):
# if no input layer in list -> build it
assert (input_var is not None)
phi = copy.deepcopy(phi)
self.phi = \
self._initialize_function('phi', phi, self.default_phi_input,
self.PHI_OUTPUT_SHAPE,
self.representation_shape)
self.input_layer = lasagne.layers.get_all_layers(self.phi)[0]
else:
# extract input layer and variable from the given phi
self.input_layer = lasagne.layers.get_all_layers(self.phi)[0]
self.input_var = self.input_layer.input_var
self.phi._fun_name = "phi"
if isinstance(psi, list) or isinstance(psi, tuple):
# if no input layer in list -> build it
psi = copy.deepcopy(psi)
self.psi = \
self._initialize_function('psi', psi, self.default_psi_input,
self.PSI_OUTPUT_SHAPE,
self.target_shape)
self.psi._fun_name = "psi"
if beta is not None and isinstance(beta, list) or isinstance(beta, tuple):
# if no input layer in list -> build it
beta = copy.deepcopy(beta)
try:
self.beta = \
self._initialize_function('beta', beta, self.default_beta_input,
self.BETA_OUTPUT_SHAPE,
self.default_beta_output_shape
)
self.beta._fun_name = "beta"
except ValueError as e:
raise Exception("Could not replace BETA_OUTPUT_SHAPE marker --"
" is the value returned by self.default_beta_output_shape"
" valid? (not None)\n"
" Futher hints: " + str(e))
# tag the parameters of each function with the name of the function
for fun, fun_name in zip([self.phi, self.psi, self.beta], ['phi', 'psi', 'beta']):
self._tag_function_parameters(fun, fun_name)
self._create_target_objective()
self._create_side_objective()
def get_side_objective(self, input, target):
"""
Pattern-specific function to get the theano expression of the side objective.
Must be implemented by each pattern.
"""
raise NotImplementedError()
@property
def training_input_vars(self):
"""Return the theano variables that are required for training.
Usually this will correspond to
(input_var, target_var, side_var)
which is also the default.
Order matters!
Returns
-------
tuple of theano tensor variables
"""
return (self.input_var, self.target_var, self.side_var)
@property
def side_input_vars(self):
"""Return the theano input variables for validating the side loss.
Per default we assume that it is all training variables except for the
target variable (see :method:`Pattern.training_input_vars`) and the
optional side target variable (see :method:`Pattern.side_target_var`).
You can override this method in your pattern.
Order matters!
Returns
-------
tuple of theano tensor variables
"""
excluded_vars = [self.target_var]
if self.side_target_var is not None:
excluded_vars.append(self.side_target_var)
return tuple([i for i in self.training_input_vars if i not in excluded_vars])
@property
def side_target_var(self):
"""Return the theano target variable required for validating
the side information (optional).
This returns None per default.
Override it the side loss of the pattern is a supervised loss, and one of the
side variables is the supervised (side) target - and then return this variable.
Also see :method:`Pattern.side_input_vars`
Returns
-------
theano tensor variable
"""
return None
@property
def side_vars(self):
"""Return the theano variables that are required for training.
Usually this will correspond to
(input_var, target_var, side_var)
which is also the default.
Order matters!
Returns
-------
tuple of theano tensor variables
"""
return (self.side_var, )
@property
def default_target_objective(self):
""" Return the default target objective used by this pattern.
(implementation required)
The target objective can be overridden by passing the
target_loss argument to the constructor of a pattern
Returns
-------
theano expression
"""
raise NotImplementedError()
@property
def default_side_objective(self):
""" Return the default side objective used by this pattern.
(implementation required)
The side objective can be overridden by passing the
side_loss argument to the constructor of a pattern
Returns
-------
theano expression
"""
raise NotImplementedError()
@property
def default_phi_input(self):
""" Specifies the default input to the function :math:`\phi` in this pattern
(implementation required)
This may either return a tuple of lasagne layer class and a
dictionary containing the params for instantiation of a layer,
or it contains a lasagne layer object
Per default, this will create/return an input_layer with self.input_var
of dimensionality self.input_shape
-------
Returns:
tuple of lasagne layer class and dictionary, or lasagne layer instance
"""
if self.input_layer is None:
# create input layer
#print ("Creating input layer for phi")
input_dim = self.input_shape
if isinstance(self.input_shape, int):
input_dim = (None, self.input_shape)
self.input_layer = lasagne.layers.InputLayer(shape=input_dim,
input_var=self.input_var, name="input")
return self.input_layer
@property
def default_psi_input(self):
""" Specifies the default input to the function :math:`\psi` in this pattern
This may either return a tuple of lasagne layer class and a
dictionary containing the params for instantiation of a layer,
or it contains a lasagne layer object
Per default, this will return the output of :math:`\phi`.
-------
Returns:
tuple of lasagne layer class and dictionary, or lasagne layer instance
"""
return self.phi
@property
def default_beta_input(self):
""" Specifies the default input to the function :math:`\beta` in this pattern
This may either return a tuple of lasagne layer class and a
dictionary containing the params for instantiation of a layer,
or it contains a lasagne layer object
-------
Returns:
tuple of lasagne layer class and dictionary, or lasagne layer instance
"""
raise NotImplementedError()
@property
def default_beta_output_shape(self):
"""Every pattern that uses an auxiliary function beta should
implement this method which computes the shape.
This is helpful for automatically building beta in nolearn style
function parameterization
--------
Returns:
int or tuple of ints
"""
raise NotImplementedError()
def _get_all_function_layers(self, fun):
"""
Get only the layers that belong to a certain function
"""
layers = []
for l in lasagne.layers.get_all_layers(fun):
if l._pattern_function == fun._fun_name:
layers.append(l)
return layers
def _tag_function_parameters(self, fun, fun_name):
"""
Helper function to add the tag `fun_name` (encoding the function name,
e.g. phi or psi) to the function `fun`
"""
for l in lasagne.layers.get_all_layers(fun):
params = l.get_params()
for p in params:
if fun_name != 'phi' and 'phi' in l.params[p]:
# print ("omitting phi for %s" % str(p))
continue
# print ("adding %s to param %s" % (fun_name, str(p)))
l.params[p].add(fun_name)
# print (" tags: " + str(l.params[p]))
def _get_params_for(self, name):
"""This method has been adapted from the NeuralFit class in nolearn.
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
Copyright (c) 2012-2015 Daniel Nouri"""
collected = {}
prefix = '{}_'.format(name)
params = vars(self)
more_params = self.more_params
for key, value in itertools.chain(params.items(), more_params.items()):
if key.startswith(prefix):
collected[key[len(prefix):]] = value
return collected
def _layer_name(self, layer_class, index):
"""This method has been adapted from the NeuralFit class in nolearn.
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
Copyright (c) 2012-2015 Daniel Nouri"""
return "{}{}".format(
layer_class.__name__.lower().replace("layer", ""), index)
def _initialize_function(self, fun_name, layers, input_layer_tuple,
output_shape_marker, output_shape):
"""Function to build phi, psi and beta automatically from a
nolearn style network-as-list description.
This method has been adapted from the NeuralFit class in nolearn.
https://github.com/dnouri/nolearn/blob/master/nolearn/lasagne/base.py
Copyright (c) 2012-2015 Daniel Nouri"""
class Layers(OrderedDict):
def __getitem__(self, key):
if isinstance(key, int):
return list(self.values()).__getitem__(key)
elif isinstance(key, slice):
items = list(self.items()).__getitem__(key)
return Layers(items)
else:
return super(Layers, self).__getitem__(key)
def keys(self):
return list(super(Layers, self).keys())
def values(self):
return list(super(Layers, self).values())
#self.__dict__[fun_name] = Layers()
#fun_ = self.__dict__[fun_name]
fun_ = Layers()
# check if layers contains input layer; if not, create one
user_input_layer = None
for i, layer_def in enumerate(layers):
if isinstance(layer_def[0], str):
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
else:
# New format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
layer_kw = layer_kw.copy()
if issubclass(layer_factory, InputLayer):
user_input_layer = layer_factory
break
if isinstance(input_layer_tuple, list) or isinstance(input_layer_tuple, tuple):
input_layer, input_layer_params = input_layer_tuple
else:
input_layer, input_layer_params = input_layer_tuple, None
if (inspect.isclass(input_layer) and issubclass(input_layer, InputLayer))\
or isinstance(input_layer, InputLayer):
if user_input_layer is not None:
# TODO check that the user provided input layer is compatible
# with the one that the pattern expects
# ok - we stick to the users input layer
pass
else:
# push the input layer into the dictionary
layers.insert(0, (input_layer, input_layer_params))
else: # input_layer is output of another function
if user_input_layer is not None:
# the user has provided an input layer. ignore it because
# we use the functional input layer from the patern
raise Exception("You have provided an input layer for %s,"
" but the pattern requires the input %s" % (fun_name, str(input_layer)))
else:
# push the input layer into the dictionary
layers.insert(0, (input_layer, input_layer_params))
# iterate through layers
if isinstance(layers[0], Layer):
# 'layers[0]' is already the output layer with type
# 'lasagne.layers.Layer', so we only have to fill
# 'fun_' and we're done:
for i, layer in enumerate(get_all_layers(layers[0])):
name = layer.name or self._layer_name(layer.__class__, i)
fun_[name] = layer
if self._get_params_for(name) != {}:
raise ValueError(
"You can't use keyword params when passing a Lasagne "
"instance object as the 'layers' parameter of "
"'Pattern'."
)
return layers[0]
# 'layers' are a list of '(Layer class, kwargs)', so
# we'll have to actually instantiate the layers given the
# arguments:
layer = None
for i, layer_def in enumerate(layers):
if isinstance(layer_def[0], str):
# The legacy format: ('name', Layer)
layer_name, layer_factory = layer_def
layer_kw = {'name': layer_name}
else:
# New format: (Layer, {'layer': 'kwargs'})
layer_factory, layer_kw = layer_def
if layer_kw is not None:
layer_kw = layer_kw.copy()
layer_is_instance = False
if layer_kw is None:
# the passed object is a an expression or an object instance.
# hence we don't have to build it later
layer_is_instance = True
layer_kw = {'name': layer_factory.name}
if 'name' not in layer_kw:
layer_kw['name'] = fun_name + "_" + self._layer_name(layer_factory, i)
#more_params = self._get_params_for(layer_kw['name'])
#layer_kw.update(more_params)
if layer_kw['name'] in fun_:
raise ValueError(
"Two layers with name {}.".format(layer_kw['name']))
# Any layers that aren't subclasses of InputLayer are
# assumed to require an 'incoming' paramter. By default,
# we'll use the previous layer as input:
if not layer_is_instance and not issubclass(layer_factory, InputLayer):
if 'incoming' in layer_kw:
layer_kw['incoming'] = fun_[
layer_kw['incoming']]
elif 'incomings' in layer_kw:
layer_kw['incomings'] = [
fun_[nm] for nm in layer_kw['incomings']]
else:
layer_kw['incoming'] = layer
for attr in ('W', 'b'):
if isinstance(layer_kw.get(attr), str):
name = layer_kw[attr]
layer_kw[attr] = getattr(fun_[name], attr, None)
for k,v in layer_kw.items():
if v == output_shape_marker:
#print ("%s triggered -> %s" % (output_shape_marker, str(output_shape)))
if output_shape is None:
raise ValueError("Cannot automatically set output shape (is None)"
" for %s - did you set all required shape variables"
" in the constructor of the pattern?"
" (marker was: %s)" % (fun_name, output_shape_marker))
layer_kw[k] = output_shape
if layer_is_instance:
layer = layer_factory
layer_wrapper = None
else:
try:
layer_wrapper = layer_kw.pop('layer_wrapper', None)
layer = layer_factory(**layer_kw)
except TypeError as e:
msg = ("Failed to instantiate {} with args {}.\n"
"Maybe parameter names have changed?".format(
layer_factory, layer_kw))
raise Exception(TypeError(msg), e)
fun_[layer_kw['name']] = layer
if layer_wrapper is not None:
layer = layer_wrapper(layer)
fun_["LW_%s" % layer_kw['name']] = layer
layer._pattern_function = fun_name
# we return the last layer as the representative of the function
# as it's common in lasagne
return layer
def _create_target_objective(self, output=None, target=None):
"""
Helper function to build the member variable target_loss.
"""
if output is None:
output = self.get_psi_output_for(self.input_var)
if target is None:
target = self.target_var
if self.target_loss is None:
assert (self.input_var is not None)
assert (self.target_var is not None)
if self.target_loss_fn is None:
fn = self.default_target_objective
else:
#print ("Target loss is function object: %s" % str(self.target_loss_fn))
fn = self.target_loss_fn
# special case: if we use the squared_error loss, but target_var is a vector
# (1 dim target) we flatten the prediction -- otherwise we get a theano error
if fn == lasagne.objectives.squared_error and \
target.type == T.dvector or target.type == T.dvector:
output = output.flatten()
# define target loss
self.target_loss = fn(output, target).mean()
# store the function, too (required by PatternTrainer)
self.target_loss_fn = fn
def _create_side_objective(self):
"""
Helper function to build the member variable side_loss.
"""
if self.side_loss is None:
assert (self.input_var is not None)
assert (self.side_var is not None)
if self.side_loss_fn is None:
# store the function, too (required by PatternTrainer)
self.side_loss_fn = self.default_side_objective
self.side_loss = self.get_side_objective(self.input_var, self.side_var)
@property
def output_shape(self):
return self.get_output_shape_for(self.input_var)
def get_params(self, **tags):
"""
Returns a list of all the Theano variables that parameterize the
pattern.
By default, all parameters that participate in the forward pass will be
returned. The list can optionally be filtered by
specifying tags as keyword arguments. For example, ``trainable=True``
will only return trainable parameters, and ``regularizable=True``
will only return parameters that can be regularized (e.g., by L2
decay).
Parameters
----------
**tags (optional)
tags can be specified to filter the list. Specifying ``tag1=True``
will limit the list to parameters that are tagged with ``tag1``.
Specifying ``tag1=False`` will limit the list to parameters that
are not tagged with ``tag1``. Commonly used tags are
``regularizable`` and ``trainable``.
Returns
-------
list of Theano shared variables
A list of variables that parameterize the layer
Notes
-----
For patterns without any parameters, this will return an empty list.
"""
# check between tags that belong to the pattern and those that belong to the layers
params = lasagne.layers.get_all_params(self.psi, **tags)
if self.beta is not None:
params += lasagne.layers.get_all_params(self.beta, **tags)
params += lasagne.layers.get_all_params(self.phi, **tags)
return params
def get_all_params(self, **tags):
"""Alias for get_params"""
return self.get_params(**tags)
def get_output_shape_for(self, input_shape):
"""
Computes the output shape of this layer, given an input shape.
Parameters
----------
input_shape : tuple
A tuple representing the shape of the input. The tuple should have
as many elements as there are input dimensions, and the elements
should be integers or `None`.
Returns
-------
tuple
A tuple representing the shape of the output of this layer. The
tuple has as many elements as there are output dimensions, and the
elements are all either integers or `None`.
Notes
-----
This method will typically be overridden when implementing a new
:class:`Layer` class. By default it simply returns the input
shape. This means that a layer that does not modify the shape
(e.g. because it applies an elementwise operation) does not need
to override this method.
"""
phi_output_shape = self.phi.get_output_shape_for(input_shape)
return self.psi.get_output_shape_for(phi_output_shape)
def get_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return self.get_psi_output_for(input, **kwargs)
def get_psi_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return lasagne.layers.get_output(self.psi, inputs=input, **kwargs)
def get_beta_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return lasagne.layers.get_output(self.beta, inputs=input, **kwargs)
def get_phi_output_for(self, input=None, **kwargs):
if input is None:
input = self.input_var
return lasagne.layers.get_output(self.phi, inputs=input, **kwargs)
def get_output_for_function(self, fun_or_fun_name, input, **kwargs):
"""
Get the output for a pattern subfunction (i.e. phi, psi, beta) by
setting the input to that subfunction manually.
The problem is that if you apply lasagne.layers.get_output to, e.g.,
psi, which gets phi as input, lasagne.layers.get_output will except
the 'input' to be the input of phi, not the input of psi.
Sometimes, it is desirable to set the input of psi manually.
Note, that this differs from the behavior of get_phi_output_for,
get_psi_output_for and get_beta_output_for, which expect the initial
input to the network (depending on the pattern, often phi's input).
"""
if type(fun_or_fun_name) == str:
fun = self.__dict__[fun_or_fun_name]
else:
fun = fun_or_fun_name
last_input = input
for l in self._get_all_function_layers(fun):
last_input = l.get_output_for(last_input, **kwargs)
return last_input
def training_loss(self, target_weight=0.5, side_weight=0.5, all_losses=False):
"""
Compute the sum of the target and side info loss. Returns a theano expression.
If all_losses is true, additionally to the summed loss the individual (weighted)
losses are returned, too.
Parameters
----------
target_weight : float
target weight
side_weight : float
side weight
target_weight : float
Default false, returns (loss) only.
If true, returns tuple (loss, target_loss, side_loss)
"""
# we need to gate because if we set one weight to 0., we might
# also want to omit the involved theano variables; w/o the if-else
# we get an "unconnected inputs" error in theano
loss = 0.
tls, sls = np.nan, np.nan
if target_weight > 0.:
tls = target_weight * self.target_loss
loss += tls
if side_weight > 0.:
sls = side_weight * self.side_loss
loss += sls
if all_losses:
return loss, tls, sls
return loss
def save(self, fn):
"""
Save your pattern's weights in a tar file containing npz files.
You can then use `load` to recreate the pattern from this file.
Parameters
----------
fn : str
file name
"""
# tar to one file
tmp_files = []
with tarfile.open(fn + ".tar", mode='w') as out:
# use lasagne style parameter storage to avoid CUDA vs. non-CUDA
# theano issue
phi_pval = lasagne.layers.get_all_param_values(self.phi)
phi_fn = fn+"_phi.npz"
np.savez(phi_fn, *phi_pval)
out.add (phi_fn)
tmp_files.append(phi_fn)
psi_pval = lasagne.layers.get_all_param_values(self.psi)
psi_fn = fn+"_psi.npz"
np.savez(psi_fn, *psi_pval)
out.add (psi_fn)
tmp_files.append(psi_fn)
if self.beta is not None:
beta_pval = lasagne.layers.get_all_param_values(self.beta)
beta_fn = fn+"_beta.npz"
np.savez(beta_fn, *beta_pval)
out.add (beta_fn)
tmp_files.append(beta_fn)
for d in tmp_files:
try:
os.unlink(d)
except:
pass
def load(self, fn):
"""
Assuming you have initialized the pattern exactly as it
was pickled in the file 'fn', you can restore all function
parameters using this function.
Parameters
----------
fn : str
file name
"""
fn_split = os.path.splitext(fn)
assert (fn_split[-1] == ".tar")
fun_names = ['phi', 'psi', 'beta']
npz_files_loaded = {}
with tarfile.open(fn, mode='r') as t:
for m in t.getmembers():
for fun_name in fun_names:
if fun_name in m.name:
#print ("extracting" + str(m))
t.extract(m)
npz_files_loaded[fun_name] = m.name
for fun_name, fun_npz in npz_files_loaded.items():
with np.load(fun_npz) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(self.__dict__[fun_name], param_values)
for _,d in npz_files_loaded.items():
try:
os.unlink(d)
except:
pass
| |
# -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
__os_mod__ = __import__("os")
__select_mod__ = __import__("select")
__socket_mod__ = __import__("socket")
_socket = __import__("socket")
import io
import wrapt
from ..core import syscall, enter_syscall
__all__ = ['OsProxy', 'SelectProxy']
# proxy the OS module
class OsProxy(wrapt.ObjectProxy):
""" proxy the os module """
_OS_SYSCALLS = ("chown", "fchown", "close", "dup", "dup2", "read",
"pread","write", "pwrite", "sendfile", "readv", "writev", "stat",
"lstat", "truncate", "sync", "lseek", "open", "posix_fallocate",
"posix_fadvise", "chmod", "chflags", )
def __init__(self):
super(OsProxy, self).__init__(__os_mod__)
def __getattr__(self, name):
# wrap syscalls
if name in self._OS_SYSCALLS:
return syscall(getattr(self.__wrapped__, name))
return getattr(self.__wrapped__, name)
if hasattr(_socket, "SocketIO"):
SocketIO = _socket.SocketIO
else:
from _socketio import SocketIO
class socket(object):
"""A subclass of _socket.socket wrapping the makefile() method and
patching blocking calls. """
__slots__ = ('_io_refs', '_sock', '_closed', )
_BL_SYSCALLS = ('accept', 'getpeername', 'getsockname',
'getsockopt', 'ioctl', 'recv', 'recvfrom', 'recvmsg',
'recvmsg_into', 'recvfrom_into', 'recv_into', 'send',
'sendall', 'sendto', 'sendmsg', )
def __init__(self, family=_socket.AF_INET, type=_socket.SOCK_STREAM,
proto=0, fileno=None):
if fileno is not None:
if hasattr(_socket.socket, 'detach'):
self._sock = _socket.socket(family, type, proto, fileno)
else:
self._sock = _socket.fromfd(fileno, family, type, proto)
else:
self._sock = _socket.socket(family, type, proto)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __getattr__(self, name):
# wrap syscalls
if name in self._BL_SYSCALLS:
return syscall(getattr(self._sock, name))
return getattr(self._sock, name)
def makefile(self, mode="r", buffering=None, encoding=None,
errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self._sock.close()
def close(self):
self._closed = True
if self._io_refs <= 0:
"""
# socket shutdown
try:
self._sock.shutdown(_socket.SHUT_RDWR)
except:
pass
"""
self._sock.close()
def detach(self):
self._closed = True
if hasattr(self._sock, 'detach'):
return self._sock.detach()
new_fd = os.dup(self._sock.fileno())
self._sock.close()
# python 2.7 has no detach method, fake it
return new_fd
class SocketProxy(wrapt.ObjectProxy):
def __init__(self):
super(SocketProxy, self).__init__(__socket_mod__)
def socket(self, *args, **kwargs):
return socket(*args, **kwargs)
def fromfd(self, fd, family, type, proto=0):
return socket(family, type, fileno=fd)
if hasattr(socket, "share"):
def fromshare(self, info):
return socket(0, 0, 0, info)
if hasattr(_socket, "socketpair"):
def socketpair(self, family=None, type=__socket_mod__.SOCK_STREAM,
proto=0):
if family is None:
try:
family = self.__wrapped__.AF_UNIX
except NameError:
family = self.__wrapped__.AF_INET
a, b = self.__wrapped__.socketpair(family, type, proto)
if hasattr(a, 'detach'):
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
else:
a = socket(family, type, proto, a.fileno())
b = socket(family, type, proto, b.fileno())
return a, b
# proxy the socket proxy
class _Poll(object):
def register(self, *args):
return self.p.register(*args)
def modify(self, *args):
return self.p.modify(*args)
def unregister(self, *args):
return self.p.unregister(*args)
def poll(self, *args, **kwargs):
return enter_syscall(self.p.poll, *args)
if hasattr(__select_mod__, "devpoll"):
class devpoll(_Poll):
def __init__(self):
self.p = __select_mod__.devpoll()
if hasattr(__select_mod__, "epoll"):
class epoll(_Poll):
def __init__(self):
self.p = __select_mod__.epoll()
def close(self):
return self.p.close()
def fileno(self):
return self.p.fileno()
def fromfd(self, fd):
return self.p.fromfd(fd)
if hasattr(__select_mod__, "poll"):
class poll(_Poll):
def __init__(self):
self.p = __select_mod__.poll()
if hasattr(__select_mod__, "kqueue"):
class kqueue(object):
def __init__(self):
self.kq = __select_mod__.kqueue()
def fileno(self):
return self.kq.fileno()
def fromfd(self, fd):
return self.kq.fromfd(fd)
def close(self):
return self.kq.close()
def control(self, *args, **kwargs):
return enter_syscall(self.kq.control, *args, **kwargs)
class SelectProxy(wrapt.ObjectProxy):
def __init__(self):
super(SelectProxy, self).__init__(__select_mod__)
if hasattr(__select_mod__, "devpoll"):
def devpoll(self):
return devpoll()
if hasattr(__select_mod__, "epoll"):
def epoll(self):
return epoll()
if hasattr(__select_mod__, "poll"):
def poll(self):
return poll()
if hasattr(__select_mod__, "kqueue"):
def kqueue(self):
return kqueue()
def select(self, *args, **kwargs):
return enter_syscall(self.__wrapped__.select, *args, **kwargs)
| |
import io
import os
import struct
from fitparse.processors import FitFileDataProcessor
from fitparse.profile import FIELD_TYPE_TIMESTAMP, MESSAGE_TYPES
from fitparse.records import (
DataMessage, FieldData, FieldDefinition, DevFieldDefinition, DefinitionMessage, MessageHeader,
BASE_TYPES, BASE_TYPE_BYTE, DevField,
add_dev_data_id, add_dev_field_description, get_dev_type
)
from fitparse.utils import calc_crc, FitParseError, FitEOFError, FitCRCError, FitHeaderError
class FitFile(object):
def __init__(self, fileish, check_crc=True, data_processor=None):
if hasattr(fileish, 'read'):
self._file = fileish
elif isinstance(fileish, bytes) and fileish[8:12] == b'.FIT':
self._file = io.BytesIO(fileish)
else:
self._file = open(fileish, 'rb')
self.check_crc = check_crc
self._processor = data_processor or FitFileDataProcessor()
# Get total filesize
self._file.seek(0, os.SEEK_END)
self._filesize = self._file.tell()
self._file.seek(0, os.SEEK_SET)
# Start off by parsing the file header (sets initial attribute values)
self._parse_file_header()
def __del__(self):
self.close()
def close(self):
if self._file and hasattr(self._file, "close"):
self._file.close()
self._file = None
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
##########
# Private low-level utility methods for reading of fit file
def _read(self, size):
if size <= 0:
return None
data = self._file.read(size)
self._crc = calc_crc(data, self._crc)
self._bytes_left -= len(data)
return data
def _read_struct(self, fmt, endian='<', data=None, always_tuple=False):
fmt_with_endian = "%s%s" % (endian, fmt)
size = struct.calcsize(fmt_with_endian)
if size <= 0:
raise FitParseError("Invalid struct format: %s" % fmt_with_endian)
if data is None:
data = self._read(size)
if size != len(data):
raise FitEOFError("Tried to read %d bytes from .FIT file but got %d" % (size, len(data)))
unpacked = struct.unpack(fmt_with_endian, data)
# Flatten tuple if it's got only one value
return unpacked if (len(unpacked) > 1) or always_tuple else unpacked[0]
def _read_and_assert_crc(self, allow_zero=False):
# CRC Calculation is little endian from SDK
crc_expected, crc_actual = self._crc, self._read_struct('H')
if (crc_actual != crc_expected) and not (allow_zero and (crc_actual == 0)):
if self.check_crc:
raise FitCRCError('CRC Mismatch [expected = 0x%04X, actual = 0x%04X]' % (
crc_expected, crc_actual))
##########
# Private Data Parsing Methods
def _parse_file_header(self):
# Initialize data
self._accumulators = {}
self._bytes_left = -1
self._complete = False
self._compressed_ts_accumulator = 0
self._crc = 0
self._local_mesgs = {}
self._messages = []
header_data = self._read(12)
if header_data[8:12] != b'.FIT':
raise FitHeaderError("Invalid .FIT File Header")
# Larger fields are explicitly little endian from SDK
header_size, protocol_ver_enc, profile_ver_enc, data_size = self._read_struct('2BHI4x', data=header_data)
# Decode the same way the SDK does
self.protocol_version = float("%d.%d" % (protocol_ver_enc >> 4, protocol_ver_enc & ((1 << 4) - 1)))
self.profile_version = float("%d.%d" % (profile_ver_enc / 100, profile_ver_enc % 100))
# Consume extra header information
extra_header_size = header_size - 12
if extra_header_size > 0:
# Make sure extra field in header is at least 2 bytes to calculate CRC
if extra_header_size < 2:
raise FitHeaderError('Irregular File Header Size')
# Consume extra two bytes of header and check CRC
self._read_and_assert_crc(allow_zero=True)
# Consume any extra bytes, since header size "may be increased in
# "future to add additional optional information" (from SDK)
self._read(extra_header_size - 2)
# After we've consumed the header, set the bytes left to be read
self._bytes_left = data_size
def _parse_message(self):
# When done, calculate the CRC and return None
if self._bytes_left <= 0:
if not self._complete:
self._read_and_assert_crc()
if self._file.tell() >= self._filesize:
self._complete = True
self.close()
return None
# Still have data left in the file - assuming chained fit files
self._parse_file_header()
return self._parse_message()
header = self._parse_message_header()
if header.is_definition:
message = self._parse_definition_message(header)
else:
message = self._parse_data_message(header)
if message.mesg_type is not None:
if message.mesg_type.name == 'developer_data_id':
add_dev_data_id(message)
elif message.mesg_type.name == 'field_description':
add_dev_field_description(message)
self._messages.append(message)
return message
def _parse_message_header(self):
header = self._read_struct('B')
if header & 0x80: # bit 7: Is this record a compressed timestamp?
return MessageHeader(
is_definition=False,
is_developer_data=False,
local_mesg_num=(header >> 5) & 0x3, # bits 5-6
time_offset=header & 0x1F, # bits 0-4
)
else:
return MessageHeader(
is_definition=bool(header & 0x40), # bit 6
is_developer_data=bool(header & 0x20), # bit 5
local_mesg_num=header & 0xF, # bits 0-3
time_offset=None,
)
def _parse_definition_message(self, header):
# Read reserved byte and architecture byte to resolve endian
endian = '>' if self._read_struct('xB') else '<'
# Read rest of header with endian awareness
global_mesg_num, num_fields = self._read_struct('HB', endian=endian)
mesg_type = MESSAGE_TYPES.get(global_mesg_num)
field_defs = []
for n in range(num_fields):
field_def_num, field_size, base_type_num = self._read_struct('3B', endian=endian)
# Try to get field from message type (None if unknown)
field = mesg_type.fields.get(field_def_num) if mesg_type else None
base_type = BASE_TYPES.get(base_type_num, BASE_TYPE_BYTE)
if (field_size % base_type.size) != 0:
# NOTE: we could fall back to byte encoding if there's any
# examples in the wild. For now, just throw an exception
raise FitParseError("Invalid field size %d for type '%s' (expected a multiple of %d)" % (
field_size, base_type.name, base_type.size))
# If the field has components that are accumulators
# start recording their accumulation at 0
if field and field.components:
for component in field.components:
if component.accumulate:
accumulators = self._accumulators.setdefault(global_mesg_num, {})
accumulators[component.def_num] = 0
field_defs.append(FieldDefinition(
field=field,
def_num=field_def_num,
base_type=base_type,
size=field_size,
))
dev_field_defs = []
if header.is_developer_data:
num_dev_fields = self._read_struct('B', endian=endian)
for n in range(num_dev_fields):
field_def_num, field_size, dev_data_index = self._read_struct('3B', endian=endian)
field = get_dev_type(dev_data_index, field_def_num)
dev_field_defs.append(DevFieldDefinition(
field=field,
dev_data_index=dev_data_index,
def_num=field_def_num,
size=field_size
))
def_mesg = DefinitionMessage(
header=header,
endian=endian,
mesg_type=mesg_type,
mesg_num=global_mesg_num,
field_defs=field_defs,
dev_field_defs=dev_field_defs,
)
self._local_mesgs[header.local_mesg_num] = def_mesg
return def_mesg
def _parse_raw_values_from_data_message(self, def_mesg):
# Go through mesg's field defs and read them
raw_values = []
for field_def in def_mesg.field_defs + def_mesg.dev_field_defs:
base_type = field_def.base_type
is_byte = base_type.name == 'byte'
# Struct to read n base types (field def size / base type size)
struct_fmt = '%d%s' % (
field_def.size / base_type.size,
base_type.fmt,
)
# Extract the raw value, ask for a tuple if it's a byte type
raw_value = self._read_struct(
struct_fmt, endian=def_mesg.endian, always_tuple=is_byte,
)
# If the field returns with a tuple of values it's definitely an
# oddball, but we'll parse it on a per-value basis it.
# If it's a byte type, treat the tuple as a single value
if isinstance(raw_value, tuple) and not is_byte:
raw_value = tuple(base_type.parse(rv) for rv in raw_value)
else:
# Otherwise, just scrub the singular value
raw_value = base_type.parse(raw_value)
raw_values.append(raw_value)
return raw_values
@staticmethod
def _resolve_subfield(field, def_mesg, raw_values):
# Resolve into (field, parent) ie (subfield, field) or (field, None)
if field.subfields:
for sub_field in field.subfields:
# Go through reference fields for this sub field
for ref_field in sub_field.ref_fields:
# Go through field defs AND their raw values
for field_def, raw_value in zip(def_mesg.field_defs, raw_values):
# If there's a definition number AND raw value match on the
# reference field, then we return this subfield
if (field_def.def_num == ref_field.def_num) and (ref_field.raw_value == raw_value):
return sub_field, field
return field, None
def _apply_scale_offset(self, field, raw_value):
# Apply numeric transformations (scale+offset)
if isinstance(raw_value, tuple):
# Contains multiple values, apply transformations to all of them
return tuple(self._apply_scale_offset(field, x) for x in raw_value)
elif isinstance(raw_value, (int, float)):
if field.scale:
raw_value = float(raw_value) / field.scale
if field.offset:
raw_value = raw_value - field.offset
return raw_value
@staticmethod
def _apply_compressed_accumulation(raw_value, accumulation, num_bits):
max_value = (1 << num_bits)
max_mask = max_value - 1
base_value = raw_value + (accumulation & ~max_mask)
if raw_value < (accumulation & max_mask):
base_value += max_value
return base_value
def _parse_data_message(self, header):
def_mesg = self._local_mesgs.get(header.local_mesg_num)
if not def_mesg:
raise FitParseError('Got data message with invalid local message type %d' % (
header.local_mesg_num))
raw_values = self._parse_raw_values_from_data_message(def_mesg)
field_datas = [] # TODO: I don't love this name, update on DataMessage too
# TODO: Maybe refactor this and make it simpler (or at least broken
# up into sub-functions)
for field_def, raw_value in zip(def_mesg.field_defs + def_mesg.dev_field_defs, raw_values):
field, parent_field = field_def.field, None
if field:
field, parent_field = self._resolve_subfield(field, def_mesg, raw_values)
# Resolve component fields
if field.components:
for component in field.components:
# Render its raw value
cmp_raw_value = component.render(raw_value)
# Apply accumulated value
if component.accumulate:
accumulator = self._accumulators[def_mesg.mesg_num]
cmp_raw_value = self._apply_compressed_accumulation(
cmp_raw_value, accumulator[component.def_num], component.bits,
)
accumulator[component.def_num] = cmp_raw_value
# Apply scale and offset from component, not from the dynamic field
# as they may differ
cmp_raw_value = self._apply_scale_offset(component, cmp_raw_value)
# Extract the component's dynamic field from def_mesg
cmp_field = def_mesg.mesg_type.fields[component.def_num]
# Resolve a possible subfield
cmp_field, cmp_parent_field = self._resolve_subfield(cmp_field, def_mesg, raw_values)
cmp_value = cmp_field.render(cmp_raw_value)
# Plop it on field_datas
field_datas.append(
FieldData(
field_def=None,
field=cmp_field,
parent_field=cmp_parent_field,
value=cmp_value,
raw_value=cmp_raw_value,
)
)
# TODO: Do we care about a base_type and a resolved field mismatch?
# My hunch is we don't
value = self._apply_scale_offset(field, field.render(raw_value))
else:
value = raw_value
# Update compressed timestamp field
if (field_def.def_num == FIELD_TYPE_TIMESTAMP.def_num) and (raw_value is not None):
self._compressed_ts_accumulator = raw_value
field_datas.append(
FieldData(
field_def=field_def,
field=field,
parent_field=parent_field,
value=value,
raw_value=raw_value,
)
)
# Apply timestamp field if we got a header
if header.time_offset is not None:
ts_value = self._compressed_ts_accumulator = self._apply_compressed_accumulation(
header.time_offset, self._compressed_ts_accumulator, 5,
)
field_datas.append(
FieldData(
field_def=None,
field=FIELD_TYPE_TIMESTAMP,
parent_field=None,
value=FIELD_TYPE_TIMESTAMP.render(ts_value),
raw_value=ts_value,
)
)
# Apply data processors
for field_data in field_datas:
# Apply type name processor
self._processor.run_type_processor(field_data)
self._processor.run_field_processor(field_data)
self._processor.run_unit_processor(field_data)
data_message = DataMessage(header=header, def_mesg=def_mesg, fields=field_datas)
self._processor.run_message_processor(data_message)
return data_message
##########
# Public API
def get_messages(self, name=None, with_definitions=False, as_dict=False):
if with_definitions: # with_definitions implies as_dict=False
as_dict = False
if name is not None:
if isinstance(name, (tuple, list)):
names = name
else:
names = [name]
# Convert any string numbers in names to ints
names = set([
int(n) if (isinstance(n, str) and n.isdigit()) else n
for n in names
])
def should_yield(message):
if with_definitions or message.type == 'data':
# name arg is None we return all
if name is None:
return True
else:
if (message.name in names) or (message.mesg_num in names):
return True
return False
# Yield all parsed messages first
for message in self._messages:
if should_yield(message):
yield message.as_dict() if as_dict else message
# If there are unparsed messages, yield those too
while not self._complete:
message = self._parse_message()
if message and should_yield(message):
yield message.as_dict() if as_dict else message
@property
def messages(self):
# TODO: could this be more efficient?
return list(self.get_messages())
def parse(self):
while self._parse_message():
pass
def __iter__(self):
return self.get_messages()
# TODO: Create subclasses like Activity and do per-value monkey patching
# for example local_timestamp to adjust timestamp on a per-file basis
| |
"""Unit tests of commenting queries."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.json_.commenting.queries import BookQuery
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_query_class_fixture(request):
# From test_templates/resource.py::ResourceQuery::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_query_test_fixture(request):
# From test_templates/resource.py::ResourceQuery::init_template
request.cls.query = request.cls.catalog.get_comment_query()
@pytest.mark.usefixtures("comment_query_class_fixture", "comment_query_test_fixture")
class TestCommentQuery(object):
"""Tests for CommentQuery"""
def test_match_reference_id(self):
"""Tests match_reference_id"""
# From test_templates/resource.py::ResourceQuery::match_avatar_id_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
if is_no_authz(self.service_config):
assert 'referenceId' not in self.query._query_terms
self.query.match_reference_id(test_id, match=True)
if is_no_authz(self.service_config):
assert self.query._query_terms['referenceId'] == {
'$in': [str(test_id)]
}
def test_clear_reference_id_terms(self):
"""Tests clear_reference_id_terms"""
# From test_templates/resource.py::ResourceQuery::clear_avatar_id_terms_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
self.query.match_reference_id(test_id, match=True)
if is_no_authz(self.service_config):
assert 'referenceId' in self.query._query_terms
self.query.clear_reference_id_terms()
if is_no_authz(self.service_config):
assert 'referenceId' not in self.query._query_terms
def test_match_commentor_id(self):
"""Tests match_commentor_id"""
# From test_templates/resource.py::ResourceQuery::match_avatar_id_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
if is_no_authz(self.service_config):
assert 'commentorId' not in self.query._query_terms
self.query.match_commentor_id(test_id, match=True)
if is_no_authz(self.service_config):
assert self.query._query_terms['commentorId'] == {
'$in': [str(test_id)]
}
def test_clear_commentor_id_terms(self):
"""Tests clear_commentor_id_terms"""
# From test_templates/resource.py::ResourceQuery::clear_avatar_id_terms_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
self.query.match_commentor_id(test_id, match=True)
if is_no_authz(self.service_config):
assert 'commentorId' in self.query._query_terms
self.query.clear_commentor_id_terms()
if is_no_authz(self.service_config):
assert 'commentorId' not in self.query._query_terms
def test_supports_commentor_query(self):
"""Tests supports_commentor_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_commentor_query()
def test_get_commentor_query(self):
"""Tests get_commentor_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_commentor_query()
def test_clear_commentor_terms(self):
"""Tests clear_commentor_terms"""
# From test_templates/resource.py::ResourceQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['commentor'] = 'foo'
self.query.clear_commentor_terms()
if is_no_authz(self.service_config):
assert 'commentor' not in self.query._query_terms
def test_match_commenting_agent_id(self):
"""Tests match_commenting_agent_id"""
# From test_templates/resource.py::ResourceQuery::match_avatar_id_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
if is_no_authz(self.service_config):
assert 'commentingAgentId' not in self.query._query_terms
self.query.match_commenting_agent_id(test_id, match=True)
if is_no_authz(self.service_config):
assert self.query._query_terms['commentingAgentId'] == {
'$in': [str(test_id)]
}
def test_clear_commenting_agent_id_terms(self):
"""Tests clear_commenting_agent_id_terms"""
# From test_templates/resource.py::ResourceQuery::clear_avatar_id_terms_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
self.query.match_commenting_agent_id(test_id, match=True)
if is_no_authz(self.service_config):
assert 'commentingAgentId' in self.query._query_terms
self.query.clear_commenting_agent_id_terms()
if is_no_authz(self.service_config):
assert 'commentingAgentId' not in self.query._query_terms
def test_supports_commenting_agent_query(self):
"""Tests supports_commenting_agent_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_commenting_agent_query()
def test_get_commenting_agent_query(self):
"""Tests get_commenting_agent_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_commenting_agent_query()
def test_clear_commenting_agent_terms(self):
"""Tests clear_commenting_agent_terms"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.clear_commenting_agent_terms()
def test_match_text(self):
"""Tests match_text"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_text(True, True, True)
def test_match_any_text(self):
"""Tests match_any_text"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_text(True)
def test_clear_text_terms(self):
"""Tests clear_text_terms"""
# From test_templates/resource.py::ResourceQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['text'] = 'foo'
self.query.clear_text_terms()
if is_no_authz(self.service_config):
assert 'text' not in self.query._query_terms
def test_match_rating_id(self):
"""Tests match_rating_id"""
# From test_templates/resource.py::ResourceQuery::match_avatar_id_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
if is_no_authz(self.service_config):
assert 'ratingId' not in self.query._query_terms
self.query.match_rating_id(test_id, match=True)
if is_no_authz(self.service_config):
assert self.query._query_terms['ratingId'] == {
'$in': [str(test_id)]
}
def test_clear_rating_id_terms(self):
"""Tests clear_rating_id_terms"""
# From test_templates/resource.py::ResourceQuery::clear_avatar_id_terms_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
self.query.match_rating_id(test_id, match=True)
if is_no_authz(self.service_config):
assert 'ratingId' in self.query._query_terms
self.query.clear_rating_id_terms()
if is_no_authz(self.service_config):
assert 'ratingId' not in self.query._query_terms
def test_supports_rating_query(self):
"""Tests supports_rating_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_rating_query()
def test_get_rating_query(self):
"""Tests get_rating_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_rating_query()
def test_match_any_rating(self):
"""Tests match_any_rating"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_rating(True)
def test_clear_rating_terms(self):
"""Tests clear_rating_terms"""
# From test_templates/resource.py::ResourceQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['rating'] = 'foo'
self.query.clear_rating_terms()
if is_no_authz(self.service_config):
assert 'rating' not in self.query._query_terms
def test_match_book_id(self):
"""Tests match_book_id"""
# From test_templates/resource.py::ResourceQuery::match_bin_id_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
self.query.match_book_id(test_id, match=True)
if is_no_authz(self.service_config):
assert self.query._query_terms['assignedBookIds'] == {
'$in': [str(test_id)]
}
def test_clear_book_id_terms(self):
"""Tests clear_book_id_terms"""
# From test_templates/resource.py::ResourceQuery::clear_bin_id_terms_template
test_id = Id('osid.Osid%3Afake%40ODL.MIT.EDU')
self.query.match_book_id(test_id, match=True)
if is_no_authz(self.service_config):
assert 'assignedBookIds' in self.query._query_terms
self.query.clear_book_id_terms()
if is_no_authz(self.service_config):
assert 'assignedBookIds' not in self.query._query_terms
def test_supports_book_query(self):
"""Tests supports_book_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_book_query()
def test_get_book_query(self):
"""Tests get_book_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_book_query()
def test_clear_book_terms(self):
"""Tests clear_book_terms"""
# From test_templates/resource.py::ResourceQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['book'] = 'foo'
self.query.clear_book_terms()
if is_no_authz(self.service_config):
assert 'book' not in self.query._query_terms
def test_get_comment_query_record(self):
"""Tests get_comment_query_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.get_comment_query_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_query_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.fake_id = Id('resource.Resource%3A1%40ODL.MIT.EDU')
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_query_test_fixture(request):
# Since the session isn't implemented, we just construct an BookQuery directly
if not is_never_authz(request.cls.service_config):
request.cls.query = BookQuery(runtime=request.cls.catalog._runtime)
@pytest.mark.usefixtures("book_query_class_fixture", "book_query_test_fixture")
class TestBookQuery(object):
"""Tests for BookQuery"""
def test_match_comment_id(self):
"""Tests match_comment_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_comment_id(True, True)
def test_clear_comment_id_terms(self):
"""Tests clear_comment_id_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['commentId'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_comment_id_terms()
if is_no_authz(self.service_config):
assert 'commentId' not in self.query._query_terms
def test_supports_comment_query(self):
"""Tests supports_comment_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_comment_query()
def test_get_comment_query(self):
"""Tests get_comment_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_comment_query()
def test_match_any_comment(self):
"""Tests match_any_comment"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_comment(True)
def test_clear_comment_terms(self):
"""Tests clear_comment_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['comment'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_comment_terms()
if is_no_authz(self.service_config):
assert 'comment' not in self.query._query_terms
def test_match_ancestor_book_id(self):
"""Tests match_ancestor_book_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_ancestor_book_id(True, True)
def test_clear_ancestor_book_id_terms(self):
"""Tests clear_ancestor_book_id_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['ancestorBookId'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_ancestor_book_id_terms()
if is_no_authz(self.service_config):
assert 'ancestorBookId' not in self.query._query_terms
def test_supports_ancestor_book_query(self):
"""Tests supports_ancestor_book_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_ancestor_book_query()
def test_get_ancestor_book_query(self):
"""Tests get_ancestor_book_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_ancestor_book_query()
def test_match_any_ancestor_book(self):
"""Tests match_any_ancestor_book"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_ancestor_book(True)
def test_clear_ancestor_book_terms(self):
"""Tests clear_ancestor_book_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['ancestorBook'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_ancestor_book_terms()
if is_no_authz(self.service_config):
assert 'ancestorBook' not in self.query._query_terms
def test_match_descendant_book_id(self):
"""Tests match_descendant_book_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_descendant_book_id(True, True)
def test_clear_descendant_book_id_terms(self):
"""Tests clear_descendant_book_id_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['descendantBookId'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_descendant_book_id_terms()
if is_no_authz(self.service_config):
assert 'descendantBookId' not in self.query._query_terms
def test_supports_descendant_book_query(self):
"""Tests supports_descendant_book_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_descendant_book_query()
def test_get_descendant_book_query(self):
"""Tests get_descendant_book_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_descendant_book_query()
def test_match_any_descendant_book(self):
"""Tests match_any_descendant_book"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_descendant_book(True)
def test_clear_descendant_book_terms(self):
"""Tests clear_descendant_book_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['descendantBook'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_descendant_book_terms()
if is_no_authz(self.service_config):
assert 'descendantBook' not in self.query._query_terms
def test_get_book_query_record(self):
"""Tests get_book_query_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.get_book_query_record(True)
| |
#!/usr/bin/python
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import sys
import array
import random
from deap import base
from deap import creator
from deap import tools
import fgeneric
import numpy as np
from operator import attrgetter
import bbobbenchmarks as bn
toolbox = base.Toolbox()
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode="d",
fitness=creator.FitnessMin)
# pool = multiprocessing.Pool()
# toolbox.register("map", futures.map)
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def main(func,
NGEN,
CXPB,
MUTPB,
dim,
ftarget,
tournsize,
n_aval,
):
toolbox.register("attr_float", random.random)
toolbox.register("select", tools.selTournament, tournsize=tournsize)
toolbox.register(
"mutate",
tools.mutGaussian,
mu=0,
sigma=1,
indpb=0.1
)
# mutShuffleIndexes
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# calculating the number of individuals of the
# populations based on the number of executions
y = int(n_aval / NGEN)
x = n_aval - y * NGEN
n = x + y
toolbox.register("evaluate", func)
toolbox.decorate("evaluate", tupleize)
toolbox.register("attr_float", random.uniform, -4, 4)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, eta = 0, low= -4, up = 4)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_float, dim)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
logbook = tools.Logbook()
logbook.header = "gen", "min", "avg", "max", "std"
pop = toolbox.population(n)
# get initial pop
filename = ("../pseudo-adaptative/init_pop_f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_2.txt")
if((np.DataSource().exists(filename)) is True):
with open(filename, 'r') as f:
a = eval(f.readline())
f.close()
for index in range(len(pop[0])):
pop[0][index] = a[index]
# Evaluate the entire population
# 2 model.bins: real data, generated model
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
# numero_avaliacoes = len(pop)
# normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# create offspring
offspring = list(toolbox.map(toolbox.clone, pop))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring,
# but the last ind replaced by best_pop
# Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"))
offspring[0] = best_pop
random.shuffle(offspring)
pop[:] = offspring
record = stats.compile(pop)
logbook.record(gen=g, **record)
if record["std"] < 10e-12:
best_pop = tools.selBest(pop, 1)[0]
pop = toolbox.population(n)
pop = sorted(pop, key=attrgetter("fitness"))
pop[0] = best_pop
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
g += 1
record = stats.compile(pop)
logbook.record(gen=g, **record)
filename = ("../SBX/init_pop_f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_2.txt")
if((np.DataSource().exists(filename)) is False):
with open(filename, "w") as myfile:
for element in best_pop:
myfile.write(str(element))
myfile.write(str(', '))
myfile.write(str('\n'))
myfile.close()
return logbook
if __name__ == "__main__":
for i in range(len(sys.argv) - 1):
if (sys.argv[i] == '-params'):
gaParams = sys.argv[i + 1]
elif (sys.argv[i] == '-tournsize'):
tournsize = int(sys.argv[i + 1])
f = open(gaParams, "r")
keys = ['key', 'NGEN', 'n_aval', 'CXPB', 'MUTPB', 'dim', 'seed', 'tournsize']
params = dict()
for line in f:
if line[0] == '#':
continue
tokens = line.split()
for key, value in zip(keys, tokens):
if key == 'key':
params[key] = value
elif key == 'CXPB' or key == 'MUTPB':
params[key] = float(value)
else:
params[key] = int(value)
f.close()
# Maximum number of restart for an algorithm that detects stagnation
# Create a COCO experiment that will log the results under the
# ./output directory
e = fgeneric.LoggingFunction('output')
# Iterate over all desired test dimensions
# for dim in (2, 3, 5, 10, 20, 40):
dim = params['dim']
# Set the maximum number function evaluation granted to the algorithm
# This is usually function of the dimensionality of the problem
# Iterate over a set of benchmarks (noise free benchmarks here)
# for f_name in bn.nfreeIDs:
f_name = 5
# Iterate over all the instance of a single problem
# Rotation, translation, etc.
# for instance in chain(range(1, 6), range(21, 31)):
instance = 1
# Set the function to be used (problem) in the logger
e.setfun(*bn.instantiate(f_name, iinstance=1))
# Independent restarts until maxfunevals or ftarget is reached
# Run the algorithm with the remaining
# number of evaluations
# random.seed(params['seed'])
logbook = main(e.evalfun,
NGEN=params['NGEN'],
CXPB=params['CXPB'],
MUTPB=params['MUTPB'],
dim=dim,
n_aval=params['n_aval'],
tournsize=tournsize,
ftarget=e.ftarget)
filename = ("../SBX/f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_" +
str(tournsize) +
".txt")
with open(filename, "a") as myfile:
myfile.write(str(logbook))
myfile.write(str('\n'))
myfile.close()
| |
import io
from zipfile import BadZipfile
from tempfile import NamedTemporaryFile
import openpyxl
from openpyxl.utils.exceptions import InvalidFileException
from django.core.files.uploadedfile import UploadedFile
from django.utils.translation import ugettext as _
class InvalidExcelFileException(Exception):
pass
class JSONReaderError(Exception):
pass
class HeaderValueError(Exception):
pass
class StringTypeRequiredError(Exception):
pass
class WorkbookJSONError(Exception):
pass
class IteratorJSONReader(object):
"""
>>> def normalize(it):
... r = []
... for row in IteratorJSONReader(it):
... r.append(sorted(row.items()))
... return r
>>> normalize([])
[]
>>> normalize([['A', 'B', 'C'], ['1', '2', '3']])
[[('A', '1'), ('B', '2'), ('C', '3')]]
>>> normalize([['A', 'data: key', 'user 1', 'user 2', 'is-ok?'],
... ['1', '2', '3', '4', 'yes']])
[[('A', '1'), ('data', {'key': '2'}), ('is-ok', True), ('user', ['3', '4'])]]
"""
def __init__(self, rows):
# you can only call __iter__ once
self._rows = iter(rows)
try:
self.headers = list(next(self._rows))
except StopIteration:
self.headers = []
self.fieldnames = self.get_fieldnames()
def row_to_json(self, row):
obj = {}
for value, header in zip(row, self.headers):
self.set_field_value(obj, header, value)
return obj
def __iter__(self):
try:
for row in self._rows:
yield self.row_to_json(row)
finally:
del self._rows
def get_fieldnames(self):
obj = {}
for field, value in zip(self.headers, [''] * len(self.headers)):
if not isinstance(field, str):
raise HeaderValueError('Field %s is not a string.' % field)
self.set_field_value(obj, field, value)
return list(obj)
@classmethod
def set_field_value(cls, obj, field, value):
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
value = value.strip()
# try dict
try:
field, subfield = field.split(':')
except Exception:
pass
else:
field = field.strip()
if field not in obj:
obj[field] = {}
cls.set_field_value(obj[field], subfield, value)
return
# try list
try:
field, _ = field.split()
except Exception:
pass
else:
dud = {}
cls.set_field_value(dud, field, value)
(field, value), = list(dud.items())
if field not in obj:
obj[field] = []
elif not isinstance(obj[field], list):
obj[field] = [obj[field]]
if value not in (None, ''):
obj[field].append(value)
return
# else flat
# try boolean
try:
field, nothing = field.split('?')
assert(nothing.strip() == '')
except Exception:
pass
else:
try:
value = {
'yes': True,
'true': True,
'no': False,
'false': False,
'': False,
None: False,
}[value.lower() if hasattr(value, 'lower') else value]
except KeyError:
raise JSONReaderError(
'Values for field %s must be "yes" or "no", not "%s"' % (
field, value)
)
# set for any flat type
field = field.strip()
if field in obj:
raise JSONReaderError(
'You have a repeat field: %s' % field
)
obj[field] = value
def get_workbook(file_or_filename):
try:
return WorkbookJSONReader(file_or_filename)
except (HeaderValueError, InvalidExcelFileException) as e:
raise WorkbookJSONError(_(
"Upload failed! "
"Please make sure you are using a valid Excel 2007 or later (.xlsx) file. "
"Error details: {}."
).format(e))
except JSONReaderError as e:
raise WorkbookJSONError(_(
"Upload failed due to a problem with Excel columns. Error details: {}."
).format(e))
except HeaderValueError as e:
raise WorkbookJSONError(_(
"Upload encountered a data type error: {}."
).format(e))
except AttributeError as e:
raise WorkbookJSONError(_(
"Error processing Excel file: {}."
).format(e))
def get_single_worksheet(file_or_filename, title=None):
workbook = get_workbook(file_or_filename)
try:
worksheet = workbook.get_worksheet(title=title)
except WorksheetNotFound:
raise WorkbookJSONError(_(
"Could not find sheet '{title}'."
).format(title=title) if title else _("Uploaded file does not contian any sheets."))
return worksheet
class WorksheetNotFound(Exception):
def __init__(self, title):
self.title = title
super(WorksheetNotFound, self).__init__()
class WorksheetJSONReader(IteratorJSONReader):
def __init__(self, worksheet, title=None):
width = 0
self.title = title
self.worksheet = worksheet
try:
header_row = next(self.worksheet.iter_rows())
except StopIteration:
header_row = []
for cell in header_row:
if cell.value is None:
break
else:
width += 1
self.worksheet.calculate_dimension(force=True)
def iterator():
def _convert_float(value):
"""
excel doesn't distinguish between 1 and 1.0
if it can be an integer assume it is
"""
if isinstance(value, float) and int(value) == value:
return int(value)
else:
# Specifically check for None so that we can allow a value of 0
return value if value is not None else ''
for row in self.worksheet.iter_rows():
cell_values = [
_convert_float(cell.value)
for cell in row[:width]
]
if not any(cell != '' for cell in cell_values):
break
yield cell_values
super(WorksheetJSONReader, self).__init__(iterator())
class WorkbookJSONReader(object):
def __init__(self, file_or_filename):
check_types = (UploadedFile, io.RawIOBase, io.BufferedIOBase)
if isinstance(file_or_filename, check_types):
tmp = NamedTemporaryFile(mode='wb', suffix='.xlsx', delete=False)
file_or_filename.seek(0)
tmp.write(file_or_filename.read())
file_or_filename.seek(0)
tmp.close()
file_or_filename = tmp.name
try:
self.wb = openpyxl.load_workbook(file_or_filename, read_only=True, data_only=True)
except (BadZipfile, InvalidFileException, KeyError) as e:
raise InvalidExcelFileException(str(e))
self.worksheets_by_title = {}
self.worksheets = []
for worksheet in self.wb.worksheets:
try:
ws = WorksheetJSONReader(worksheet, title=worksheet.title)
except IndexError:
raise JSONReaderError('This Excel file has unrecognised formatting. Please try downloading '
'the lookup table first, and then add data to it.')
self.worksheets_by_title[worksheet.title] = ws
self.worksheets.append(ws)
def get_worksheet(self, title=None, index=None):
if title is not None and index is not None:
raise TypeError("Can only get worksheet by title *or* index")
if title:
try:
return self.worksheets_by_title[title]
except KeyError:
raise WorksheetNotFound(title=title)
elif index:
try:
return self.worksheets[index]
except IndexError:
raise WorksheetNotFound(title=index)
else:
try:
return self.worksheets[0]
except IndexError:
raise WorksheetNotFound(title=0)
def flatten_json_to_path(obj, path=()):
if isinstance(obj, dict):
for key, value in obj.items():
for item in flatten_json_to_path(value, path + (key,)):
yield item
elif isinstance(obj, list):
for key, value in enumerate(obj):
for item in flatten_json_to_path(value, path + (key,)):
yield item
else:
yield (path, obj)
def format_header(path, value):
# pretty sure making a string-builder would be slower than concatenation
s = path[0]
for p in path[1:]:
if isinstance(p, str):
s += f': {p}'
elif isinstance(p, int):
s += f' {p + 1}'
if isinstance(value, bool):
s += '?'
value = 'yes' if value else 'no'
return s, value
def flatten_json(obj):
for key, value in flatten_json_to_path(obj):
yield format_header(key, value)
def json_to_headers(obj):
return [key for key, value in sorted(flatten_json(obj), key=lambda t: alphanumeric_sort_key(t[0]))]
def alphanumeric_sort_key(key):
"""
Sort the given iterable in the way that humans expect.
Thanks to http://stackoverflow.com/a/2669120/240553
"""
import re
convert = lambda text: int(text) if text.isdigit() else text
return [convert(c) for c in re.split('([0-9]+)', key)]
def enforce_string_type(value):
if isinstance(value, str):
return value
if isinstance(value, int):
return str(value)
# Don't try to guess for decimal types how they should be converted to string
raise StringTypeRequiredError()
| |
from __future__ import unicode_literals
import unittest
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import (User, UserProfile, UserStat, UserStatResult, StatDetails,
AdvancedUserStat, Image, Product, Parent1, Parent2, Child1, Child2, Child3,
Child4)
class ReverseSelectRelatedTestCase(TestCase):
def setUp(self):
user = User.objects.create(username="test")
UserProfile.objects.create(user=user, state="KS", city="Lawrence")
results = UserStatResult.objects.create(results='first results')
userstat = UserStat.objects.create(user=user, posts=150,
results=results)
StatDetails.objects.create(base_stats=userstat, comments=259)
user2 = User.objects.create(username="bob")
results2 = UserStatResult.objects.create(results='moar results')
advstat = AdvancedUserStat.objects.create(user=user2, posts=200, karma=5,
results=results2)
StatDetails.objects.create(base_stats=advstat, comments=250)
p1 = Parent1(name1="Only Parent1")
p1.save()
c1 = Child1(name1="Child1 Parent1", name2="Child1 Parent2", value=1)
c1.save()
p2 = Parent2(name2="Child2 Parent2")
p2.save()
c2 = Child2(name1="Child2 Parent1", parent2=p2, value=2)
c2.save()
def test_basic(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
def test_follow_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results").get(username="test")
self.assertEqual(u.userstat.posts, 150)
self.assertEqual(u.userstat.results.results, 'first results')
def test_follow_two(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile", "userstat").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
self.assertEqual(u.userstat.posts, 150)
def test_follow_two_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results", "userstat__statdetails").get(username="test")
self.assertEqual(u.userstat.results.results, 'first results')
self.assertEqual(u.userstat.statdetails.comments, 259)
def test_forward_and_back(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related("user__userprofile").get(user__username="test")
self.assertEqual(stat.user.userprofile.state, 'KS')
self.assertEqual(stat.user.userstat.posts, 150)
def test_back_and_forward(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat").get(username="test")
self.assertEqual(u.userstat.user.username, 'test')
def test_not_followed_by_default(self):
with self.assertNumQueries(2):
u = User.objects.select_related().get(username="test")
self.assertEqual(u.userstat.posts, 150)
def test_follow_from_child_class(self):
with self.assertNumQueries(1):
stat = AdvancedUserStat.objects.select_related('user', 'statdetails').get(posts=200)
self.assertEqual(stat.statdetails.comments, 250)
self.assertEqual(stat.user.username, 'bob')
def test_follow_inheritance(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related('user', 'advanceduserstat').get(posts=200)
self.assertEqual(stat.advanceduserstat.posts, 200)
self.assertEqual(stat.user.username, 'bob')
self.assertEqual(stat.advanceduserstat.user.username, 'bob')
def test_nullable_relation(self):
im = Image.objects.create(name="imag1")
p1 = Product.objects.create(name="Django Plushie", image=im)
p2 = Product.objects.create(name="Talking Django Plushie")
with self.assertNumQueries(1):
result = sorted(Product.objects.select_related("image"), key=lambda x: x.name)
self.assertEqual([p.name for p in result], ["Django Plushie", "Talking Django Plushie"])
self.assertEqual(p1.image, im)
# Check for ticket #13839
self.assertIsNone(p2.image)
def test_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 1-1 relation.
"""
with self.assertNumQueries(1):
user = User.objects.select_related('userprofile').get(username='bob')
with self.assertRaises(UserProfile.DoesNotExist):
user.userprofile
def test_nullable_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 0-1 relation.
"""
Image.objects.create(name="imag1")
with self.assertNumQueries(1):
image = Image.objects.select_related('product').get()
with self.assertRaises(Product.DoesNotExist):
image.product
def test_parent_only(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Only Parent1")
with self.assertNumQueries(0):
with self.assertRaises(Child1.DoesNotExist):
p.child1
def test_multiple_subclass(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
def test_onetoone_with_subclass(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2').get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
def test_onetoone_with_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
with self.assertRaises(Child3.DoesNotExist):
p.child2.child3
p3 = Parent2(name2="Child3 Parent2")
p3.save()
c2 = Child3(name1="Child3 Parent1", parent2=p3, value=2, value3=3)
c2.save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child3 Parent2")
self.assertEqual(p.child2.name1, 'Child3 Parent1')
self.assertEqual(p.child2.child3.value3, 3)
self.assertEqual(p.child2.child3.value, p.child2.value)
self.assertEqual(p.child2.name1, p.child2.child3.name1)
def test_multiinheritance_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1', 'child1__child4').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
self.assertEqual(p.child1.name1, p.name1)
with self.assertRaises(Child4.DoesNotExist):
p.child1.child4
Child4(name1='n1', name2='n2', value=1, value4=4).save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1', 'child1__child4').get(name2="n2")
self.assertEqual(p.name2, 'n2')
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.name2, p.name2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.name1, p.child1.name1)
self.assertEqual(p.child1.child4.name2, p.child1.name2)
self.assertEqual(p.child1.child4.value, p.child1.value)
self.assertEqual(p.child1.child4.value4, 4)
@unittest.expectedFailure
def test_inheritance_deferred(self):
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.name2, 'n2')
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
@unittest.expectedFailure
def test_inheritance_deferred2(self):
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
qs = Parent2.objects.select_related('child1', 'child4').only(
'id2', 'child1__value', 'child1__child4__value4')
with self.assertNumQueries(1):
p = qs.get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.value4, 4)
self.assertEqual(p.child1.child4.id2, c.id2)
p = qs.get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
p = qs.get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name1, 'n1')
with self.assertNumQueries(1):
self.assertEqual(p.child1.child4.name1, 'n1')
class ReverseSelectRelatedValidationTests(TestCase):
"""
Rverse related fields should be listed in the validation message when an
invalid field is given in select_related().
"""
non_relational_error = "Non-relational field given in select_related: '%s'. Choices are: %s"
invalid_error = "Invalid field name(s) given in select_related: '%s'. Choices are: %s"
def test_reverse_related_validation(self):
fields = 'userprofile, userstat'
with self.assertRaisesMessage(FieldError, self.invalid_error % ('foobar', fields)):
list(User.objects.select_related('foobar'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('username', fields)):
list(User.objects.select_related('username'))
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to build and run the tensorflow graph for the sequence-to-sequence model"""
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import data
FLAGS = tf.app.flags.FLAGS
class Discriminator(object):
"""A class to represent a sequence-to-sequence model for text summarization. Supports both baseline mode, pointer-generator mode, and coverage"""
def __init__(self, hps, vocab):
self._hps = hps
self._vocab = vocab
def _add_placeholders(self):
"""Add placeholders to the graph. These are entry points for any input data."""
hps = self._hps
# encoder part
self._target_batch = tf.placeholder(tf.int32, [hps.batch_size* hps.max_enc_sen_num, hps.max_enc_seq_len], name='enc_batch')
#self._target_lens = tf.placeholder(tf.int32, [hps.batch_size* hps.max_enc_sen_num], name='enc_lens')
self._dec_batch = tf.placeholder(tf.int32, [hps.batch_size * hps.max_enc_sen_num, hps.max_enc_seq_len], name='enc_batch')
self._dec_lens = tf.placeholder(tf.int32, [hps.batch_size * hps.max_enc_sen_num], name='enc_lens')
#self._enc_sen_lens = tf.placeholder(tf.int32, [hps.batch_size * hps.], name='enc_sen_lens')
self._target_mask = tf.placeholder(tf.float32,
[hps.batch_size* hps.max_enc_sen_num, hps.max_enc_seq_len],
name='target_mask')
#self._enc_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='enc_padding_mask')
self._decay = tf.placeholder(tf.float32, name="decay_learning_rate")
self.label = tf.placeholder(tf.float32, [hps.batch_size * hps.max_enc_sen_num, hps.max_enc_seq_len], name="positive_negtive")
#self._target_batch = tf.placeholder(tf.int32,
# [hps.batch_size* hps.max_enc_sen_num],
# name='target_batch')
def _make_feed_dict(self, batch):
feed_dict = {}
feed_dict[self._target_batch] = batch.target_batch
feed_dict[self._dec_batch] = batch.dec_batch
feed_dict[self._dec_lens] = batch.dec_sen_lens
feed_dict[self.label] = batch.labels
#feed_dict[self._enc_sen_lens] = batch.enc_sen_lens
#feed_dict[self._enc_padding_mask] = batch.enc_padding_mask
feed_dict[self._target_mask] = batch.dec_padding_mask
#feed_dict[self.label] = batch.labels
return feed_dict
def _build_model(self):
"""Add the whole sequence-to-sequence model to the graph."""
hps = self._hps
vsize = self._vocab.size() # size of the vocabulary
with tf.variable_scope('discriminator'):
# Some initializers
self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag,
seed=123)
self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)
# Add embedding matrix (shared by the encoder and decoder inputs)
with tf.variable_scope('embedding'):
embedding = tf.get_variable('embedding', [vsize, hps.emb_dim], dtype=tf.float32,
initializer=self.trunc_norm_init)
emb_dec_inputs = tf.nn.embedding_lookup(embedding,
self._dec_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)
self.emb_enc_inputs = emb_dec_inputs
## Add the encoder.
#encoder_vector = self._add_encoder(emb_enc_inputs, self._enc_lens, hps)
with tf.variable_scope('output_projection'):
w = tf.get_variable('w_output', [hps.hidden_dim, vsize], dtype=tf.float32,
initializer=self.trunc_norm_init)
v = tf.get_variable('v_output', [vsize], dtype=tf.float32, initializer=self.trunc_norm_init)
with tf.variable_scope('decoder'):
# When decoding, use model output from the previous step
# for the next step.
loop_function = None
cell = tf.contrib.rnn.LSTMCell(
hps.hidden_dim,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113),
state_is_tuple=False)
#tf.logging.info(emb_dec_inputs)
emb_dec_inputs = tf.unstack(emb_dec_inputs, axis=1)
self._dec_in_state = cell.zero_state(FLAGS.batch_size* hps.max_enc_sen_num, tf.float32)
# tf.logging.info(self._dec_in_state)
# tf.logging.info(emb_dec_inputs)
decoder_outputs, self._dec_out_state = tf.contrib.legacy_seq2seq.rnn_decoder(
emb_dec_inputs,self._dec_in_state,
cell, loop_function=None
)
decoder_outputs = tf.transpose(decoder_outputs, [1, 0, 2])
decoder_outputs = tf.reshape(decoder_outputs,
[-1,
hps.hidden_dim])
decoder_outputs = tf.nn.xw_plus_b(decoder_outputs, w, v)
decoder_outputs = tf.reshape(decoder_outputs,
[hps.batch_size * hps.max_enc_sen_num, hps.max_enc_seq_len,
FLAGS.vocab_size])
'''crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self._target_batch, logits=decoder_outputs)
self.out_put = tf.argmax(crossent, 1)
self.out_put = tf.reshape(self.out_put, [hps.batch_size, hps.max_enc_sen_num])'''
'''weights = self._target_mask * self.label
self.train_loss = tf.contrib.seq2seq.sequence_loss(
decoder_outputs,
self._target_batch,
weights,
average_across_timesteps=True,
average_across_batch=True)'''
weights = self._target_mask * self.label
self.train_loss = tf.contrib.seq2seq.sequence_loss(
decoder_outputs,
self._target_batch,
weights,
average_across_timesteps=True,
average_across_batch=True)
self.out_loss = tf.contrib.seq2seq.sequence_loss(
decoder_outputs,
self._target_batch,
self._target_mask,
average_across_timesteps=False,
average_across_batch=False)
self.out_loss=tf.reshape(self.out_loss, [-1])
#label=tf.reshape(self.label, [-1])
#self.train_loss = tf.reduce_mean(self.out_loss)/(hps.batch_size*hps.max_enc_sen_num*hps.max_enc_seq_len)
self.out_loss = tf.reshape(self.out_loss, [hps.batch_size, hps.max_enc_sen_num, hps.max_enc_seq_len])
self.out_loss_sentence = tf.reduce_mean(self.out_loss,axis = -1)
def _add_train_op(self):
"""Sets self._train_op, the op to run for training."""
# Take gradients of the trainable variables w.r.t. the loss function to minimize
loss_to_minimize = self.train_loss
tvars = tf.trainable_variables()
gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
# Add a summary
tf.summary.scalar('global_norm', global_norm)
# Apply adagrad optimizer
optimizer = tf.train.AdagradOptimizer(self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)
self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
def build_graph(self):
"""Add the placeholders, model, global step, train_op and summaries to the graph"""
with tf.device("/gpu:" + str(FLAGS.gpuid)):
tf.logging.info('Building graph...')
t0 = time.time()
self._add_placeholders()
self._build_model()
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._add_train_op()
t1 = time.time()
tf.logging.info('Time to build graph: %i seconds', t1 - t0)
def run_train_step(self, sess, batch, decay=False):
"""Runs one training iteration. Returns a dictionary containing train op, summaries, loss, global_step and (optionally) coverage loss."""
feed_dict = self._make_feed_dict(batch)
feed_dict[self._decay] = 1.0
if decay:
feed_dict[self._decay] = 0.001
to_return = {
'train_op': self._train_op,
'loss': self.train_loss,
'out_loss': self.out_loss,
'global_step': self.global_step,
}
return sess.run(to_return, feed_dict)
def run_pre_train_step(self, sess, batch):
"""Runs one training iteration. Returns a dictionary containing train op, summaries, loss, global_step and (optionally) coverage loss."""
feed_dict = self._make_feed_dict(batch)
feed_dict[self._decay] = 1.0
to_return = {
'train_op': self._train_op,
'loss': self.train_loss,
'out_loss': self.out_loss,
'global_step': self.global_step,
}
return sess.run(to_return, feed_dict)
def run_ypred_auc(self, sess, batch):
"""Runs one training iteration. Returns a dictionary containing train op, summaries, loss, global_step and (optionally) coverage loss."""
feed_dict = self._make_feed_dict(batch)
to_return = {
'y_pred_auc': self.out_loss,
'y_pred_auc_sentence': self.out_loss_sentence
}
return sess.run(to_return, feed_dict)
'''def run_eval_step(self, sess, batch):
"""Runs one evaluation iteration. Returns a dictionary containing summaries, loss, global_step and (optionally) coverage loss."""
feed_dict = self._make_feed_dict(batch)
error_list =[]
error_label = []
to_return = {
'predictions': self.out_put,
}
results = sess.run(to_return, feed_dict)
right =0
number =0
output = results['predictions']
for i in range(len(batch.labels)):
if batch.target_mask[i] == 1:
if results['predictions'][i] == batch.labels[i]:
right +=1
else:
error_label.append(results['predictions'][i])
error_list.append(batch.original_reviews[i])
number+=1
print (batch.labels)
print (batch.target_mask)
print (results['predictions'])
print (right)
print (number)
print (error_label)
print (error_list)
return right, number,error_list,error_label'''
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Filename: patscan.py
'''
wat.escort.patscan
@author: Andrew Philpot
@version 1.14
Usage: python patscan.py -c category -f family -i indicator tok1 tok2 tok3 ... tokN
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
\t-c, --category:\tmajor category of rules to apply (default: use all)
\t-f, --family:\tminor category of rules to apply (default: use all)
\t-i, --indicator, --indic:\ttest precise rule by specifying name as X.Y.Z
\t-t, --type\tfiletype (default: text)
'''
import sys
import re
from collections import defaultdict
import codecs
import pprint
import argparse
import json
from pymod.cstokensearcher import CaseSensitiveTokenSearcher as TokenSearcher
from pymod.util import echo, isString, isSequence
from wat.tool.wattok import Tokenizer
VERSION = '1.14'
__version__ = VERSION
REVISION = "$Revision: 22285 $".replace('$','')
APPLICATION = 'patscan'
# c:d:f:i:j:k:l:r:s:tv
CONFIG = 'esc509'
TEST = False
VERBOSE = False
# FROM KELLY
# Young
# New
# Tight
# In town for a limited time/just visiting
# No black males
# Older men only (gives an age range, etc)
# User has multiple associated ads, especially linked to other girls
# Associated with other ads that have a different age posted (by user or phone number)
# Two or three girl special
# Ask about my friend
# Posted in multiple cities
# Differing area codes in an ad
# Age listed on post
# Looks young (body or face)
# No face shot, not looking at camera, face blurred
# Looks like stock photos pulled from the internet or other ads
# More clothed
# Multiple victims (faces) in the photos
# Incalls only (helpful for juveniles, also helpful for conducting operations as teams may only be set up for one or the other)
byIndic = dict()
byCategory = defaultdict(lambda: list())
byFamily = dict()
def patternScan(tokenList, category=None, family=None, indicator=None):
def gen():
if family:
phrase = byFamily[(category, family)]
if phrase:
yield phrase
elif category:
for phrase in byCategory[category]:
yield phrase
elif indicator:
# coerce indicator to list
key = None
if isSequence(indicator):
key = tuple(indicator)
elif isString(indicator):
key = tuple([int(x) for x in indicator.split('.')])
else:
raise ValueError(indicator)
phrase = byIndic[key]
if phrase:
yield phrase
else:
# all
for cat in byCategory:
for phrase in byCategory[cat]:
# print phrase
yield phrase
ts = TokenSearcher(tokenList)
result = list()
for phrase in gen():
matches = ts.findall(phrase.pattern)
if matches:
result.append({"category": phrase.category,
"family": phrase.family,
"indicator": phrase.indic,
"pattern": phrase.pattern,
"weight": phrase.weight,
"matches": matches})
return result
class Phrase(object):
def __init__(self, indic, category, family, pattern, weight):
"""Would like this to be interned"""
self.indic = indic
self.category = category
self.family = family
self.pattern = pattern
self.weight = weight
self.index()
def __str__(self):
return "<Phrase %s: %s.%s>" % (self.indic, self.category, self.family)
def __repr__(self):
return self.__str__()
def __unicode__(self):
return self.__str__()
def index(self):
byIndic[self.indic] = self
byCategory[self.category].append(self)
byFamily[(self.category, self.family)] = self
Phrase.byIndic = byIndic
Phrase.byCategory = byCategory
Phrase.byFamily = byFamily
vocab = {
"friend": "|".join(["FRIEND", "FREIND", "GIRLFRIEND", "GIRLFREIND", "GF", "FREND", "PLAYMATE"]),
"friends": "|".join(["FRIENDS", "FREINDS", "GIRLFRIENDS", "GIRLFREINDS", "GFS", "FRENDS", "PLAYMATES"]),
"two": "|".join(["2", "TWO"]),
"both": "|".join(["BOTH", "ALL", "TWO", "THREE"]),
"have": "|".join(["HAVE", "HAV", "CHOOSE", "EXPERIENCE", "ENJOY"]),
"girls": "|".join(["GIRLS", "WOMEN", "LADIES", "GIRLZ", "GIRLZZ", "LADIEZ", "CHICKS", "GURLS", "GURLZ", "GURLZZ"]),
"girl": "|".join(["GIRL", "WOMAN", "LADY", "CHICK", "GURL"]),
"special": "|".join(["SHOW", "SPECIAL", "$PECIAL", "SPECAIL"]),
"specials": "|".join(["SHOWS", "SPECIALS", "$PECIALS", "SPECAILS"]),
"double": "|".join(["DOUBLE", "TWICE", "DBL", "DBBL", "DBLS", "DBBLS"]),
"your": "|".join(["YOUR", "YER", "YOURE", "YOU 'RE"]),
"pleasure": "|".join(["PLEASURE", "FUN", "EXCITING", "EXCITEMENT", "SEXY", "TREAT"]),
"man": "|".join(["MAN", "MALE", "GENTLEMAN", "GENTELMAN", "GENTLMAN", "DUDE", "DATE", "GENT", "GUY", "BOY", "B0Y", "FELLOW", "FELLA", "PERSON"]),
"men": "|".join(["MEN", "MALES", "GENTLEMEN", "GENTELMEN", "GENTLMEN", "DUDES", "DATES", "GENTS", "GUYS", "BOYS", "B0YS", "FELLOWS", "FELLAS", "PERSONS", "PEOPLE"]),
"black": "|".join(["BLACK", "AFRICAN-AMERICAN", "AA", "AFRO-AMERICAN", "AFRICANAMERICA", "AFROAMERICAN", "BLK", "NEGRO", "NIGGER", "SPADE"]),
"blacks": "|".join(["BLACKS", "BLACKMEN", "AFRICAN-AMERICANS", "AAS", "NEGROS", "NEGROES", "NIGGERS", "SPADES"]),
"african": "|".join(["AFRICAN", "AFRO"]),
"white": "|".join(["WHITE", "CAUCASIAN", "CAUCASION", "CRACKER", "HONKY", "HONKIE", "HONKEY"]),
"whites": "|".join(["WHITES", "CAUCASIANS", "CAUCASIONS", "WHITEMEN", "CRACKERS", "HONKIES", "HONKEYS"]),
"thug": "|".join(["THUG", "THUGG", "PIMP"]),
"thugs": "|".join(["THUGS", "THUGGS", "PIMPS"]),
"town": "|".join(["TOWN", "AREA", "CITY", "NEIGHBORHOOD"]),
"new": "|".join(["NEW", "NU"]),
"limited": "|".join(["LIMITED", "SHORT"]),
"time": "|".join(["TIME", "VISIT", "WHILE"]),
"few": "|".join(["FEW", "COUPLE"]),
"num": "|".join(["2", "TWO", "TOW", "3", "THREE", "4", "FOUR", "5", "FIVE"]),
"days": "|".join(["DAYS", "NIGHTS", "HOURS"]),
"older": "|".join(["OLDER", "MATURE"]),
"prefer": "|".join(["PREFER", "LOVE", "❤"]),
"preferred": "|".join(["PREFERRED", "ESPECIALLY", "PLEASE"]),
"a": "|".join(["A", "AN"]),
## 22 May 2013
## NOTE: Tokenization yields many tokens which include 'incall'
## etc. but also include other characters, such as general
## location, price, hours. This is a good candidate for a rework
## where offsets and examineWindow would accept a regex rather
## than only a literal
"incall": "|".join(["INCALL", "IN-CALL", "INCALLS", "IN-CALLS", "INCALLZ", "IN-CALLZ", "INS",
"INCAL", "INCALS",
"I/C", "IC"]),
"outcall": "|".join(["OUTCALL", "OUT-CALL", "OUTCALLS", "OUT-CALLS", "OUTCALLZ", "OUT-CALLZ", "OUTCAL",
"0UTCALL", "0UT-CALL", "0UTCALLS", "0UT-CALLS", "0UTCALLZ", "0UT-CALLZ", "OUTCALS",
# OC can also mean Orange County so we don't want to use it in isolation
"O/C"]),
"call": "|".join(["CALL", "CALLS", "CALLZ", "CALSS", "C@LL", "CAL", "CALS", "CALZ"]),
"place": "|".join(["PLACE", "HOTEL", "HOME", "APARTMENT", "APT"]),
"in": "|".join(["IN", "INS", "INZ"]),
"out": "|".join(["OUT", "0UT", "OUTS", "0UTS", "OUTZ", "0UTZ"]),
# desciptors for "location"
"safe": "|".join(["SAFE", "CLEAN", "NICE", "UPSCALE", "COMFORTABLE", "PRIVATE"]),
"discreet": "|".join(["DISCREET", "DISCRETE", "DESCREET", "DISCRET", "DESCRETE", "DISCREETE", "DISCRETO", "DESCRET", "DISCREAT", "DISCRETA", "DECREET", "DICREET", "DECRETE"]),
"location": "|".join(["LOCATION", "RESIDENTIAL", "RESIDENCE"]),
"honey": "|".join(["HONEY", "HON", "HUN"]),
"punctuation": "|".join([",", "\\*", "!", ":"]),
# punctuation/etc intended to indicate and/or
"andor": "|".join([",", "/", "&", "AND", "OR", "~", "-", "--"]),
## 12 August 2013
"asian": "|".join(["ASIAN", "ASAIN",
"CHINESE", "JAPANESE", "KOREAN", "THAI", "VIETNAMESE", "ORIENTAL",
"FILIPINO", "FILIPINA", "PHILIPINO", "PHILIPINA",
"MALAYSIAN"]),
"latina": "|".join(["LATINA", "LATINO", "LATIN"]),
"asia": "|".join(["ASIA", "CHINA", "HONG" "KONG" "BEIJING" "TAIWAN" "TAIPEI"
"JAPAN", "TOKYO", "OSAKA",
"KOREA", "SEOUL",
"THAILAND", "BANGKOK",
"VIETNAM",
"ORIENT",
"PHILIPPINES", "MANILA",
"MALAYSIA"]),
# "spa": "|".join(["SPA", "MASSAGE"]),
"spa": "|".join(["SPA"]),
"therapy": "|".join(["MASSEUSE", "MASSEUSES", "THERAPY", "THERAPIST", "THERAPISTS", "THERAPEUTIC", "RUB", "SHOWER", "STAFF", "STAFFS"]),
"shiatsu": "|".join(["SHIATSU", "SWEDISH", "TISSUE", "NURU", "REFLEXOLOGY"]),
"massage": "|".join(["MASSAGE", "MASSEUSE", "MASSEUSES"]),
# not agency
# no agency
# not an agency
# no * agency
#
# not with an agency
# not affiliated|associated with any|an agency
# [do not work] for an agency
# law enforcement[s] enforcment inforcement agency
# by an agency
"agency": "|".join(["AGENCY", "CLUB", "ESTABLISHMENT", "AGENCIA"]),
"affiliated": "|".join(["AFFILIATED", "ASSOCIATED", "WORK", "EMPLOYED"]),
"with": "|".join(["WITH", "BY"]),
"law": "|".join(["LAW"]),
# '%nf%c%nt%' is productive and reasonably targeted
"enforcement": "|".join(["ENFORCEMENT", "ENFORCEMENTS", "ENFORCMENT", "INFORCEMENT",
"ENTRAP", "ENTRAPMENT", "ENRAPEMENT"]),
# not with an agency
# not affiliated|associated with any|an agency
# [do not work] for an agency
# law enforcement[s] enforcment inforcement agency
"superbowl": "|".join(["superbowl", "meadowlands"]),
}
def v(*args):
return "|".join([vocab[arg] for arg in args])
## (r"(?V1)<(?-i)ABC> <(?-i)def>")
## SECTION 1: multi-provider
# ask about my friend
Phrase((1,1,1), "multiProvider", "ask", r"""(?V1)<(?i)ASK> <(?i)ABOUT> <(?i)MY> <(?i).*>? <(?i)%s>""" % v('friend','friends'), 1)
# I have a friend
Phrase((1,1,2), "multiProvider", "i have", r"""(?V1)<(?i)I> <(?i)ALSO>? <(?i)%s> <(?i)%s>? <(?i).*>? (<(?i)%s>)""" % (v('have'), v('a'), v('friend','friends')), 1)
# me and my friend
Phrase((1,1,3), "multiProvider", "me and my", r"""(?V1)<(?i)ME> <(?i)AND> <(?i)MY> <(?i).*>? <(?i)%s>""" % v('friend','friends'), 1)
# with my friend
Phrase((1,1,4), "multiProvider", "with my", r"""(?V1)<(?i)WITH> <(?i)MY> <(?i).*>? <(?i)%s>""" % v('friend','friends'), 1)
# possessive: my friend ' s name is
Phrase((1,1,5), "multiProvider", "my friends name", r"""(?V1)<(?i)MY> <(?i)%s> <(?i)'S> <(?i)NAME>""" % v('friend'), 1)
# my friend(s), my cool friend, my really cool friend
Phrase((1,2,1), "multiProvider", "my friend", r"""(?V1)<(?i)MY> <(?i).*>{0,2} <(?i)%s>""" % v('friend', 'friends'), 0.5)
# my nice, cool friend(s)
Phrase((1,2,2), "multiProvider", "my x,y friend", r"""(?V1)<(?i)MY> <(?i).*> <(?i),>, <(?i).*> <(?i)%s>""" % v('friend', 'friends'), 0.5)
# two/2 girl shows/specials
Phrase((1,3,1), "multiProvider", "two girl show", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)%s>""" % (v('num'), v('girl'), v('special', 'specials')), 1)
# have both of us
Phrase((1,4,1), "multiProvider", "have both of us", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)OF> <(?i)US>""" % (v('have'), v('both')), 1)
# both girls for [$]
Phrase((1,5,1), "multiProvider", "both girls for", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)FOR>""" % (v('both'), v('girls','friends')), 1)
# [$] for both girls
Phrase((1,5,2), "multiProvider", "for both girls", r"""(?V1)<(?i)FOR> <(?i)%s> <(?i)%s>""" % (v('both'), v('girls','friends')), 1)
# double {the} pleasure
Phrase((1,6,1), "multiProvider", "double pleasure", r"""(?V1)<(?i)%s> <(?i)THE>? <(?i)%s>""" % (v('double'), v('pleasure')), 1)
# double your pleasure
Phrase((1,6,2), "multiProvider", "double your pleasure", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)%s>""" % (v('double'), v('your'), v('pleasure')), 1)
# double you're (sic) pleasure
Phrase((1,6,3), "multiProvider", "double you're pleasure", r"""(?V1)<(?i)%s> <(?i)YOU> <(?i)'RE> <(?i)%s>""" % (v('double'), v('pleasure')), 1)
# twice as exciting
Phrase((1,7,1), "multiProvider", "twice as exciting", r"""(?V1)<(?i)%s> <(?i)AS> <(?i)%s>""" % (v('double'), v('pleasure')), 1)
# two times as exciting, 2 times the pleasure
Phrase((1,7,2), "multiProvider", "two times as exciting", r"""(?V1)<(?i)%s> <(?i)TIMES> <(?i)AS|THE> <(?i)%s>""" % (v('num'), v('pleasure')), 1)
# duo (any context)
Phrase((1,8,1), "multiProvider", "duo", r"""(?V1)<(?i)DUO>""", 1)
## SECTION 2: RACE/ETHNIC SELECTION
# no black men
Phrase((2,1,1), "raceEthnicSelect", "no black men", r"""(?V1)<(?i)NO> <(?i)%s> <(?i)%s>""" % (v('black'), v('man', 'men')), 1)
# no african american men
Phrase((2,1,2), "raceEthnicSelect", "no african american men", r"""(?V1)<(?i)NO> <(?i)%s> <(?i)AMERICAN> <(?i)%s>""" % (v('african'), v('man', 'men')), 1)
# no african-american men
Phrase((2,1,3), "raceEthnicSelect", "no african-american men", r"""(?V1)<(?i)NO> <(?i)%s> <(?i)-> <(?i)AMERICAN> <(?i)%s>""" % (v('african'), v('man', 'men')), 1)
# no blacks
Phrase((2,1,4), "raceEthnicSelect", "no blacks", r"""(?V1)<(?i)NO> <(?i)%s>""" % (v('blacks')), 1)
Phrase((2,1,5), "raceEthnicSelect", "no thugs", r"""(?V1)<(?i)NO> <(?i)%s>""" % (v('thugs')), 1)
# white men only
Phrase((2,2,1), "raceEthnicSelect", "white men only", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)ONLY>""" % (v('white'), v('man', 'men')), 1)
# only white men, only date/see white men
Phrase((2,2,2), "raceEthnicSelect", "only white men", r"""(?V1)<(?i)ONLY> <(?i)DATE|SEE>? <(?i)%s> <(?i)%s>""" % (v('white'), v('man', 'men')), 1)
# whites only
Phrase((2,2,3), "raceEthnicSelect", "whites only", r"""(?V1)<(?i)%s> <(?i)ONLY>""" % (v('whites')), 1)
# only whites, only date/see whites
Phrase((2,2,4), "raceEthnicSelect", "only whites", r"""(?V1)<(?i)ONLY> <(?i)DATE|SEE>? <(?i)%s>""" % (v('whites')), 1)
# white men preferred
Phrase((2,3,1), "raceEthnicSelect", "white men preferred", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)%s>""" % (v('white'), v('man', 'men'), v('preferred')), 1)
# prefer white men, prefer to date/see white men
Phrase((2,3,2), "raceEthnicSelect", "prefer white men", r"""(?V1)<(?i)%s> <(?i)TO>? <(?i)DATE|SEE>? <(?i)%s> <(?i)%s>""" % (v('prefer'), v('white'), v('man', 'men')), 1)
# whites preferred
Phrase((2,3,3), "raceEthnicSelect", "whites preferred", r"""(?V1)<(?i)%s> <(?i)%s>""" % (v('whites'), v('preferred')), 1)
# prefer whites, prefer to date/see whites
Phrase((2,3,4), "raceEthnicSelect", "prefer whites", r"""(?V1)<(?i)%s> <(?i)TO>? <(?i)DATE|SEE|SERVICE|SERVE>? <(?i)%s>""" % (v('prefer'), v('whites')), 1)
### I don't service blacks, do not date blacks
Phrase((2,4,1), "raceEthnicSelect", "do not see blacks", r"""(?V1)<(?i)DO> <(?i)NOT> <(?i)DATE|SEE|SERVICE|SERVE> <(?i)%s>""" % (v('blacks')), 1)
Phrase((2,4,2), "raceEthnicSelect", "do not see black men", r"""(?V1)<(?i)DO> <(?i)NOT> <(?i)DATE|SEE|SERVICE|SERVE> <(?i)%s> <(?i)%s>""" % (v('black'), v('man', 'men')), 1)
Phrase((2,4,3), "raceEthnicSelect", "dont see blacks", r"""(?V1)<(?i)DO> <(?i)N'T> <(?i)DATE|SEE|SERVICE|SERVE> <(?i)%s>""" % (v('blacks')), 1)
Phrase((2,4,4), "raceEthnicSelect", "dont see black men", r"""(?V1)<(?i)DO> <(?i)N'T> <(?i)DATE|SEE|SERVICE|SERVE> <(?i)%s> <(?i)%s>""" % (v('black'), v('man', 'men')), 1)
Phrase((2,4,5), "raceEthnicSelect", "do not see thugs", r"""(?V1)<(?i)DO> <(?i)NOT> <(?i)DATE|SEE|SERVICE|SERVE> <(?i)%s>""" % (v('thugs')), 1)
Phrase((2,4,6), "raceEthnicSelect", "dont see thugs", r"""(?V1)<(?i)DO> <(?i)NOT> <(?i)DATE|SEE|SERVICE|SERVE> <(?i)%s>""" % (v('thugs')), 1)
### TBD
## SECTION 3: Limited duration: New arrival, limited time, etc.
# new in town
Phrase((3,1,1), "briefDuration", "new in town", r"""(?V1)<(?i)%s> <(?i)IN|N|TO> <(?i)THE|THIS>? <(?i)%s>""" % (v('new'), v('town')), 1)
# [for a] limited time
Phrase((3,2,1), "briefDuration", "limited time", r"""(?V1)<(?i)%s> <(?i)%s>""" % (v('limited'), v('time')), 1)
# (for a) few days/nights, few more days, couple
Phrase((3,3,1), "briefDuration", "few days", r"""(?V1)<(?i)%s> <(?i)MORE>? <(?i)%s>""" % (v('few'), v('days')), 1)
# (for a) few exciting days
Phrase((3,3,2), "briefDuration", "few * days", r"""(?V1)<(?i)%s> <(?i)%s>""" % (v('few'), v('days')), 0.5)
# (for) two days only
Phrase((3,4,1), "briefDuration", "two days only", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)ONLY>""" % (v('num'), v('days')), 1)
# 2 more days
Phrase((3,4,2), "briefDuration", "two more days", r"""(?V1)<(?i)%s> <(?i)MORE> <(?i)%s>""" % (v('num'), v('days')), 1)
# visiting
Phrase((3,5,1), "briefDuration", "visiting", r"""(?V1)<(?i)VISITING>""", 1)
# SECTION 4: customer age selection
# older men
Phrase((4,1,1), "ageSelect", "older men only", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)ONLY>""" % (v('older'), v('man', 'men')), 1.0)
Phrase((4,1,2), "ageSelect", "only older men", r"""(?V1)<(?i)ONLY> <(?i)%s> <(?i)%s>""" % (v('older'), v('man', 'men')), 1.0)
# older, generous men
Phrase((4,1,3), "ageSelect", "older x, y men", r"""(?V1)<(?i)%s> <(?i).*> <(?i),>? <(?i).*> <(?i)%s>""" % (v('older'), v('man', 'men')), 0.8)
# men 35 or older
Phrase((4,1,4), "ageSelect", "man x,y, older", r"""(?V1)<(?i)%s> <(?i).*> <(?i).*> <(?i)%s>""" % (v('man', 'men'), v('older')), 0.8)
# prefer/love older men
Phrase((4,2,1), "ageSelect", "prefer older men", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)%s>""" % (v('prefer'), v('older'), v('man', 'men')), 1)
Phrase((4,2,2), "ageSelect", "prefer older x y men", r"""(?V1)<(?i)%s> <(?i)%s> <(?i).*>{0,2} <(?i)%s>""" % (v('prefer'), v('older'), v('man', 'men')), 1)
Phrase((4,2,3), "ageSelect", "prefer older x,y men", r"""(?V1)<(?i)%s> <(?i)%s> <(?i).*> <(?i),> <(?i).*> <(?i)%s>""" % (v('prefer'), v('older'), v('man', 'men')), 1)
Phrase((4,2,4), "ageSelect", "older men preferred", r"""(?V1)<(?i)%s> <(?i)%s> <(?i)%s>""" % (v('older'), v('man', 'men'), v('preferred')), 1)
# SECTION 5: provider youthfulness
# young
Phrase((5,1,1), "providerYouth", "young", r"""(?V1)<(?i)YOUNG>""", 0.8)
# tight
Phrase((5,2,1), "providerYouth", "tight", r"""(?V1)<(?i)TIGHT|TITE>""", 0.8)
# new: ideally, this matches only if new does not match new in town etc above
Phrase((5,3,1), "providerYouth", "new", r"""(?V1)<(?i)%s>""" % v('new'), 0.5)
# SECTION 6: incall
Phrase((6,1,1), "incall", "incall", r"""(?V1)<(?i)%s>""" % v('incall'), 1)
# in call , in-call
Phrase((6,1,2), "incall", "in call", r"""(?V1)<(?i)IN> <(?i)->? <(?i)%s>""" % v('call'), 1)
Phrase((6,2,1), "incall", "my place", r"""(?V1)<(?i)MY> <(?i)%s>""" % v('place'), 1)
# hosting (should it be hosting in/at/out of?)
Phrase((6,3,1), "incall", "hosting", r"""(?V1)<(?i)HOSTING>""", 1)
Phrase((6,4,1), "incall", "safe location", r"""(?V1)<(?i)%s>""" % v('safe'), 1)
Phrase((6,4,2), "incall", "discreet location", r"""(?V1)<(?i)%s>""" % v('discreet'), 1)
# SECTION 7: NOTINCALL
Phrase((7,1,1), "notincall", "not", r"""(?V1)<(?i)NO|NOT|N0> <(?i)%s>""" % v('incall'), 1)
# SECTION 8: outcall
Phrase((8,1,1), "outcall", "outcall", r"""(?V1)<(?i)%s>""" % v('outcall'), 1)
# out call, out-call
Phrase((8,1,2), "outcall", "out-call", r"""(?V1)<(?i)OUT> <(?i)-> <(?i)%s>""" % v('call'), 1)
# your place
Phrase((8,2,1), "outcall", "your place", r"""(?V1)<(?i)%s> <(?i)%s>""" % (v('your'), v('place')), 1)
# you're place
Phrase((8,2,2), "outcall", "you're place", r"""(?V1)<(?i)YOU> <(?i)'RE> <(?i)%s>""" % v('place'), 1)
# hotel friendly? hotel/motel
# SECTION 9: NOTOUTCALL
Phrase((9,1,1), "notoutcall", "not", r"""(?V1)<(?i)NO|NOT|N0> <(?i)%s>""" % v('outcall'), 1)
# SECTION 10: incall + outcall both
# single token
Phrase((10,1,1), "incalloutcall", "incall/outcall", r"""(?V1)<(?i)(?:%s)[-/*_]+(?:%s)>""" % (v('incall', 'in'), v('outcall', 'out')), 1)
# two tokens
Phrase((10,2,1), "incalloutcall", "in call/outcall", r"""(?V1)<(?i)IN> <(?i)(?:%s)[-/*_]+(?:%s)>""" % (v('call'), v('outcall', 'out')), 1)
# three tokens
Phrase((10,3,1), "incalloutcall", "incall / outcall", r"""(?V1)<(?i)%s> <(?i)%s>? <(?i)%s>""" % (v('incall', 'in'), v('andor'), v('outcall', 'out')), 1)
# SECTION 13: names
# needs case sensitivity
Phrase((13,1,1), "names", "my name is", r"""(?V1)<(?i)THE|MY> <(?i)NAME> <(?i)IS|'S> <(?-i)[A-Z][a-zA-Z-]*>""", 1)
Phrase((13,1,2), "names", "my names", r"""(?V1)<(?i)THE|MY> <(?i)NAMES> <(?-i)[A-Z][a-zA-Z-]*>""", 1)
Phrase((13,2,1), "names", "hi fellas i am", r"""(?V1)<(?i)HELLO|HI|HEY|HOWDY>? <(?i)%s>? <(?i)%s>? <(?i)I> <(?i)AM|'M> <(?-i)[A-Z][a-zA-Z-]*>""" % (v('honey', 'men'), v('punctuation')), 1)
Phrase((13,2,2), "names", "hi fellas im", r"""(?V1)<(?i)HELLO|HI|HEY|HOWDY>? <(?i)%s>? <(?i)%s>? <(?i)IM|IAM> <(?-i)[A-Z][a-zA-Z-]*>""" % (v('honey', 'men'), v('punctuation')), 1)
Phrase((13,3,1), "names", "ask for", r"""(?V1)<(?i)ASK> <(?i)FOR> <(?-i)[A-Z][a-zA-Z-]*>""", 1)
Phrase((13,3,2), "names", "try me", r"""(?V1)<(?i)TRY> <(?i)ME> <(?-i)[A-Z][a-zA-Z-]*>""", 1)
Phrase((13,3,4), "names", "call now", r"""(?V1)<(?i)CALL> <(?i)NOW|ME> <(?i)%s>? <(?-i)[A-Z][a-zA-Z-]*>""" % v('punctuation'), 1)
Phrase((13,4,1), "names", "yours", r"""(?V1)<(?i)YOURS|YERS> <(?i)TRULY|TRUELY>? <(?i)%s>? <(?-i)[A-Z][a-zA-Z-]*>""" % v('punctuation'), 1)
Phrase((13,4,2), "names", "xoxo", r"""(?V1)<(?i)XOXO> <(?i)%s>? <(?-i)[A-Z][a-zA-Z-]*>""" % v('punctuation'), 1)
Phrase((13,4,3), "names", "kisses", r"""(?V1)<(?i)KISSES> <(?i)%s>? <(?-i)[A-Z][a-zA-Z-]*>""" % v('punctuation'), 1)
Phrase((13,4,4), "names", "mwah", r"""(?V1)<(?i)MWAH|MUAH> <(?i)%s>? <(?-i)[A-Z][a-zA-Z-]*>""" % v('punctuation'), 1)
# SECTION 15: race/ethnic
Phrase((15,1,1), "ethnicityNationality", "asian", r"""(?V1)<(?i)%s>""" % v('asian'), 1)
Phrase((15,1,2), "ethnicityNationality", "asia", r"""(?V1)<(?i)%s>""" % v('asia'), 1)
Phrase((15,1,3), "ethnicityNationality", "latina", r"""(?V1)<(?i)%s>""" % v('latina'), 1)
# SECTION 21: spa
Phrase((21,1,1), "spa", "spa", r"""(?V1)<(?i)%s>""" % v('spa'), 1)
Phrase((21,1,2), "spa", "therapy", r"""(?V1)<(?i)%s>""" % v('therapy'), 0.5)
Phrase((21,1,3), "spa", "massage", r"""(?V1)<(?i)%s> (?V1)<(?i)%s>""" % (v('shiatsu'), v('massage')), 1)
# SECTION 31: agency
Phrase((31,1,1), "agency", "agency", r"""(?V1)<(?i)%s>""" % v('agency'), 1)
# SECTION 32: not an agency
# no agency, not an agency
# not with an agency, not with any agency
Phrase((32,1,1), "notagency", "not", r"""(?V1)<(?i)NO|NOT|N0> <.*>{0,2} <(?i)%s>""" % v('agency'), 1)
# not affiliated|associated with any|an agency
# [do not work] for an agency
Phrase((32,1,2), "notagency", "affiliated", r"""(?V1)<(?i)%s> <(?i)%s> <.*>? <(?i)%s>""" % (v('affiliated'), v('with'), v('agency')), 1)
# law enforcement[s] enforcment inforcement agency
Phrase((32,1,3), "notagency", "enforcement", r"""(?V1)<(?i)%s> <(?i)%s>""" % (v('enforcement'), v('agency')), 1)
# SECTION 91: superbowl
Phrase((91,1,1), "superbowl", "superbowl", r"""(?V1)<(?i)SUPERBOWL>""", 1)
Phrase((91,1,2), "superbowl", "super bowl", r"""(?V1)<(?i)SUPER> (?V1)<(?i)BOWL>""", 1)
Phrase((91,2,1), "superbowl", "meadowlands", r"""(?V1)<(?i)MEADOWLANDS>""", 1)
Phrase((91,2,2), "superbowl", "meadow lands", r"""(?V1)<(?i)MEADOW> (?V1)<(?i)LANDS>""", 1)
# SECTION 92: NBA
Phrase((92,1,1), "NBA", "NBA", r"""(?V1)<(?i)NBA>""", 1)
Phrase((92,2,1), "NBA", "allstar", r"""(?V1)<(?i)allstar>""", 1)
Phrase((92,2,2), "NBA", "all star", r"""(?V1)<(?i)ALL> <->? (?V1)<(?i)STAR>""", 1)
def main(argv=None):
'''this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('-c','--category', required=False,
help='major category of rules to apply (default: use all)')
parser.add_argument('-f','--family', required=False,
help='minor category of rules to apply (default: use all)')
parser.add_argument('-i','--indicator', required=False,
help='Indicate precise rule as X.Y.Z')
parser.add_argument('-t','--type', required=False, default='text',
help='input file type', choices=('text', 'html'))
parser.add_argument('-v','--verbose', required=False, help='verbose', action='store_true')
args=parser.parse_args()
with codecs.open(args.input, 'r', encoding='utf-8') as f:
text = f.read()
if args.type == 'html':
from pymod.htmlextract import extract_text
text = extract_text(text)
tok = Tokenizer(text)
tokens = [t for t in tok.genTokens()]
result = patternScan(tokens, category=args.category,
family=args.family, indicator=args.indicator)
print >> sys.stdout, json.dumps(result, indent=4)
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
# End of patscan.py
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of DataChannels for communicating across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import Queue as queue
import threading
from apache_beam.coders import coder_impl
from apache_beam.runners.api import beam_fn_api_pb2
import grpc
# This module is experimental. No backwards-compatibility guarantees.
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self, close_callback=None):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(object):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(instruction_id, targets):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, target1)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def input_elements(self, instruction_id, expected_targets):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected targets. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_targets: which targets to wait on for completion
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, target):
"""Returns an output stream writing elements to target.
Args:
instruction_id: which instruction this stream belongs to
target: the target of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_targets=None):
for data in self._inputs:
if data.instruction_reference == instruction_id:
yield data
def output_stream(self, instruction_id, target):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
return ClosableOutputStream(add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
def close(self):
self._to_send.put(self._WRITES_FINISHED)
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def input_elements(self, instruction_id, expected_targets):
received = self._receiving_queue(instruction_id)
done_targets = []
while len(done_targets) < len(expected_targets):
data = received.get()
if not data.data and data.target in expected_targets:
done_targets.append(data.target)
else:
assert data.target not in done_targets
yield data
def output_stream(self, instruction_id, target):
def add_to_send_queue(data):
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=''))
return ClosableOutputStream(add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=broad-except
logging.exception('Failed to read inputs in the data plane')
raise
finally:
self._reads_finished.set()
def _start_reader(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self._start_reader(data_stub.Data(self._write_outputs()))
class GrpcServerDataChannel(
beam_fn_api_pb2.BeamFnDataServicer, _GrpcDataChannel):
"""A DataChannel wrapping the server side of a BeamFnData connection."""
def Data(self, elements_iterator, context):
self._start_reader(elements_iterator)
for elements in self._write_outputs():
yield elements
class DataChannelFactory(object):
"""An abstract factory for creating ``DataChannel``."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def create_data_channel(self, function_spec):
"""Returns a ``DataChannel`` from the given function_spec."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self):
self._data_channel_cache = {}
def create_data_channel(self, function_spec):
remote_grpc_port = beam_fn_api_pb2.RemoteGrpcPort()
function_spec.data.Unpack(remote_grpc_port)
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
logging.info('Creating channel for %s', url)
grpc_channel = grpc.insecure_channel(url)
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_function_spec):
return self._in_memory_data_channel
def close(self):
pass
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of SpineNet-MBConv model.
SpineNet with MBConv blocks.
X. Du, T-Y. Lin, P. Jin, G. Ghiasi, M. Tan, Y. Cui, Q. V. Le, X. Song
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization
https://arxiv.org/abs/1912.05027
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
from ops import spatial_transform_ops
DEFAULT_EXPAND_RATIO = 6
FILTER_SIZE_MAP = {
0: 8,
1: 16,
2: 24,
3: 40,
4: 80,
5: 112,
6: 112,
7: 112,
}
# The static SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'mbconv', (0, 1), False),
(2, 'mbconv', (1, 2), False),
(4, 'mbconv', (1, 2), False),
(3, 'mbconv', (3, 4), False),
(4, 'mbconv', (3, 5), False),
(6, 'mbconv', (4, 6), False),
(4, 'mbconv', (4, 6), False),
(5, 'mbconv', (7, 8), False),
(7, 'mbconv', (7, 9), False),
(5, 'mbconv', (9, 10), False),
(5, 'mbconv', (9, 11), False),
(4, 'mbconv', (6, 11), True),
(3, 'mbconv', (5, 11), True),
(5, 'mbconv', (8, 13), True),
(7, 'mbconv', (6, 15), True),
(6, 'mbconv', (13, 15), True),
]
SCALING_MAP = {
'49': {
'endpoints_num_filters': 48,
'filter_size_scale': 1.0,
'block_repeats': 1,
},
'49S': {
'endpoints_num_filters': 40,
'filter_size_scale': 0.65,
'block_repeats': 1,
},
'49XS': {
'endpoints_num_filters': 24,
'filter_size_scale': 0.6,
'block_repeats': 1,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level, block_fn, input_offsets, is_output):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
def block_group(inputs,
in_filters,
out_filters,
strides,
expand_ratio,
block_repeats,
se_ratio=0.2,
batch_norm_activation=nn_ops.BatchNormActivation(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Creates one group of blocks for Mobile SpineNet."""
# Apply strides only to the first block in block_group.
inputs = nn_blocks.mbconv_block(
inputs,
in_filters,
out_filters,
expand_ratio,
strides,
se_ratio=se_ratio,
batch_norm_activation=batch_norm_activation,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = nn_blocks.mbconv_block(
inputs,
out_filters,
out_filters,
expand_ratio,
1, # strides
se_ratio=se_ratio,
batch_norm_activation=batch_norm_activation,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
def resample_with_sepconv(feat,
target_width,
target_num_filters,
use_native_resize_op=False,
batch_norm_activation=nn_ops.BatchNormActivation(),
data_format='channels_last',
name=None,
is_training=False):
"""Match resolution and feature dimension to the target block."""
_, height, width, num_filters = feat.get_shape().as_list()
if width is None or num_filters is None:
raise ValueError('Shape of feat is None (shape:{}).'.format(feat.shape))
with tf.variable_scope('resample_with_sepconv_{}'.format(name)):
# Down-sample.
if width > target_width:
if width % target_width != 0:
raise ValueError('width ({}) is not divisible by '
'target_width ({}).'.format(width, target_width))
while width > target_width:
feat = nn_ops.depthwise_conv2d_fixed_padding(
inputs=feat, kernel_size=3, strides=2, data_format=data_format)
feat = batch_norm_activation(feat, is_training=is_training)
width /= 2
# Up-sample with NN interpolation.
elif width < target_width:
if target_width % width != 0:
raise ValueError('target_wdith ({}) is not divisible by '
'width ({}).'.format(target_width, width))
scale = target_width // width
if use_native_resize_op:
feat = tf.image.resize_nearest_neighbor(feat,
[height * scale, width * scale])
else:
feat = spatial_transform_ops.nearest_upsampling(feat, scale=scale)
# Match feature dimension to the target block.
feat = nn_ops.conv2d_fixed_padding(
inputs=feat,
filters=target_num_filters,
kernel_size=1,
strides=1,
data_format=data_format)
feat = batch_norm_activation(feat, relu=False, is_training=is_training)
return feat
def get_drop_connect_rate(init_rate, i, n):
"""Get drop connect rate for the ith block."""
if (init_rate is not None) and (init_rate > 0 and init_rate < 1):
dc_rate = init_rate * float(i + 1) / n
logging.info('Drop connect rate %f for block_%d.', dc_rate, i)
else:
dc_rate = None
return dc_rate
class SpineNetMBConv(object):
"""Class to build SpineNet family models with MBConv blocks."""
def __init__(self,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
endpoints_num_filters=48,
use_native_resize_op=False,
se_ratio=0.2,
block_repeats=1,
filter_size_scale=1.0,
activation='swish',
batch_norm_activation=nn_ops.BatchNormActivation(
activation='swish'),
init_drop_connect_rate=None,
data_format='channels_last'):
"""SpineNetMBConv initialization function.
Args:
min_level: `int` minimum level in SpineNet endpoints.
max_level: `int` maximum level in SpineNet endpoints.
block_specs: a list of BlockSpec objects that specifies the SpineNet
network topology. By default, the previously discovered architecture is
used.
endpoints_num_filters: `int` feature dimension applied to endpoints before
sharing conv layers in head.
use_native_resize_op: Whether to use native
tf.image.nearest_neighbor_resize or the broadcast implmentation to do
upsampling.
se_ratio: squeeze and excitation ratio for MBConv blocks.
block_repeats: `int` number of repeats per block.
filter_size_scale: `float` a scaling factor to uniformaly scale feature
dimension in SpineNet.
activation: the activation function after cross-scale feature fusion.
Support 'relu' and 'swish'.
batch_norm_activation: An operation that includes a batch normalization
layer followed by an optional activation layer.
init_drop_connect_rate: `float` initial drop connect rate.
data_format: An optional string from: "channels_last", "channels_first".
Defaults to "channels_last".
"""
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._use_native_resize_op = use_native_resize_op
self._se_ratio = se_ratio
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
if activation == 'relu':
self._activation = tf.nn.relu
elif activation == 'swish':
self._activation = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._batch_norm_activation = batch_norm_activation
self._init_dc_rate = init_drop_connect_rate
self._data_format = data_format
self._dropblock = nn_ops.Dropblock()
def _build_stem_network(self, inputs, is_training):
"""Build the stem network."""
# Build the first conv layer.
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=int(FILTER_SIZE_MAP[0] * self._filter_size_scale),
kernel_size=3,
strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_activation(inputs, is_training=is_training)
# Build the initial L1 block and L2 block.
base0 = block_group(
inputs=inputs,
in_filters=int(FILTER_SIZE_MAP[0] * self._filter_size_scale),
out_filters=int(FILTER_SIZE_MAP[1] * self._filter_size_scale),
expand_ratio=DEFAULT_EXPAND_RATIO,
block_repeats=self._block_repeats,
strides=1,
se_ratio=self._se_ratio,
batch_norm_activation=self._batch_norm_activation,
dropblock=self._dropblock,
data_format=self._data_format,
name='stem_block_0',
is_training=is_training)
base1 = block_group(
inputs=base0,
in_filters=int(FILTER_SIZE_MAP[1] * self._filter_size_scale),
out_filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale),
expand_ratio=DEFAULT_EXPAND_RATIO,
block_repeats=self._block_repeats,
strides=2,
se_ratio=self._se_ratio,
batch_norm_activation=self._batch_norm_activation,
dropblock=self._dropblock,
data_format=self._data_format,
name='stem_block_1',
is_training=is_training)
return [base0, base1]
def _build_endpoints(self, features, is_training):
"""Match filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
feature = nn_ops.conv2d_fixed_padding(
inputs=features[level],
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
data_format=self._data_format)
feature = self._batch_norm_activation(feature, is_training=is_training)
endpoints[level] = feature
return endpoints
def _build_scale_permuted_network(self, feats, input_width, is_training):
"""Builds the scale permuted network from a given config."""
# Number of output connections from each feat.
num_outgoing_connections = [0] * len(feats)
output_feats = {}
for i, block_spec in enumerate(self._block_specs):
with tf.variable_scope('sub_policy{}'.format(i)):
# Find feature map size, filter size, and block fn for the target block.
target_width = int(math.ceil(input_width / 2 ** block_spec.level))
target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *
self._filter_size_scale)
def _input_ind(input_offset):
if input_offset < len(feats):
return input_offset
else:
raise ValueError(
'input_offset ({}) is out of existing blocks({})'.format(
input_offset, len(feats)))
# Resample and merge two parent blocks.
input0 = _input_ind(block_spec.input_offsets[0])
input1 = _input_ind(block_spec.input_offsets[1])
parent0_feat = feats[input0]
parent0_feat = resample_with_sepconv(
parent0_feat,
target_width,
target_num_filters,
use_native_resize_op=self._use_native_resize_op,
batch_norm_activation=self._batch_norm_activation,
data_format=self._data_format,
name='resample_{}_0'.format(i),
is_training=is_training)
num_outgoing_connections[input0] += 1
parent1_feat = feats[input1]
parent1_feat = resample_with_sepconv(
parent1_feat,
target_width,
target_num_filters,
use_native_resize_op=self._use_native_resize_op,
batch_norm_activation=self._batch_norm_activation,
data_format=self._data_format,
name='resample_{}_1'.format(i),
is_training=is_training)
num_outgoing_connections[input1] += 1
# Sum parent0 and parent1 to create the target feat.
target_feat = parent0_feat + parent1_feat
# Connect intermediate blocks with outdegree 0 to the output block.
if block_spec.is_output:
for j, (j_feat, j_connections) in enumerate(
zip(feats, num_outgoing_connections)):
if j_connections == 0 and (
j_feat.shape[2] == target_width and
j_feat.shape[3] == target_feat.shape[3]):
target_feat += j_feat
num_outgoing_connections[j] += 1
with tf.variable_scope('scale_permuted_block_{}'.format(len(feats))):
target_feat = self._activation(target_feat)
# Build the target block.
target_feat = block_group(
inputs=target_feat,
in_filters=target_num_filters,
out_filters=target_num_filters,
expand_ratio=DEFAULT_EXPAND_RATIO,
block_repeats=self._block_repeats,
strides=1,
se_ratio=self._se_ratio,
batch_norm_activation=self._batch_norm_activation,
drop_connect_rate=get_drop_connect_rate(self._init_dc_rate, i,
len(self._block_specs)),
dropblock=self._dropblock,
data_format=self._data_format,
name='scale_permuted_block_{}'.format(i),
is_training=is_training)
feats.append(target_feat)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in output_feats:
raise ValueError(
'Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
raise ValueError('Output level is out of range [{}, {}]'.format(
self._min_level, self._max_level))
output_feats[block_spec.level] = target_feat
return output_feats
def __call__(self, images, is_training=False):
"""Generate a multiscale feature pyramid.
Args:
images: The input image tensor.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels
[min_level, min_level + 1, ..., max_level]. The values are corresponding
features with shape [batch_size, height_l, width_l,
endpoints_num_filters].
"""
_, _, in_width, _ = images.get_shape().as_list()
with tf.variable_scope('spinenet_mbconv'):
feats = self._build_stem_network(images, is_training)
feats = self._build_scale_permuted_network(feats, in_width, is_training)
endpoints = self._build_endpoints(feats, is_training)
return endpoints
def spinenet_mbconv_builder(model_id,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
use_native_resize_op=False,
se_ratio=0.2,
activation='swish',
batch_norm_activation=nn_ops.BatchNormActivation(
activation='swish'),
init_drop_connect_rate=None,
data_format='channels_last'):
"""Builds the SpineNet-MBConv network."""
if model_id not in SCALING_MAP:
raise ValueError('SpineNetMBConv {} is not a valid architecture.'
.format(model_id))
scaling_params = SCALING_MAP[model_id]
return SpineNetMBConv(
min_level=min_level,
max_level=max_level,
block_specs=block_specs,
endpoints_num_filters=scaling_params['endpoints_num_filters'],
use_native_resize_op=use_native_resize_op,
se_ratio=se_ratio,
block_repeats=scaling_params['block_repeats'],
filter_size_scale=scaling_params['filter_size_scale'],
activation=activation,
batch_norm_activation=batch_norm_activation,
init_drop_connect_rate=init_drop_connect_rate,
data_format=data_format)
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import drmemory_analyze
class BaseTool(object):
"""Abstract class for running dynamic error detection tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=100000,
help="timeout in seconds for the run (default 100000)")
self._parser.add_option("", "--build-dir",
help="the location of the compiler output")
self._parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool for an example.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class DrMemory(BaseTool):
"""Dr.Memory
Dynamic memory error detector for Windows.
http://dev.chromium.org/developers/how-tos/using-drmemory
It is not very mature at the moment, some things might not work properly.
"""
def __init__(self, full_mode, pattern_mode):
super(DrMemory, self).__init__()
self.full_mode = full_mode
self.pattern_mode = pattern_mode
self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
def ToolName(self):
return "drmemory"
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a drmemory suppression file")
parser.add_option("", "--follow_python", action="store_true",
default=False, dest="follow_python",
help="Monitor python child processes. If off, neither "
"python children nor any children of python children "
"will be monitored.")
parser.add_option("", "--indirect_pdfium_test", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--use_debug", action="store_true",
default=False, dest="use_debug",
help="Run Dr. Memory debug build")
parser.add_option("", "--trace_children", action="store_true",
default=True,
help="TODO: default value differs from Valgrind")
def ToolCommand(self):
"""Get the tool command to run."""
# WINHEAP is what Dr. Memory supports as there are issues w/ both
# jemalloc (https://github.com/DynamoRIO/drmemory/issues/320) and
# tcmalloc (https://github.com/DynamoRIO/drmemory/issues/314)
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
"JSIMD_FORCEMMX" : "1", # https://github.com/DynamoRIO/drmemory/issues/540
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
"with the path to drmemory.exe"
proc = drmem_cmd.split(" ")
# By default, don't run python (this will exclude python's children as well)
# to reduce runtime. We're not really interested in spending time finding
# bugs in the python implementation.
# With file-based config we must update the file every time, and
# it will affect simultaneous drmem uses by this user. While file-based
# config has many advantages, here we may want this-instance-only
# (https://github.com/DynamoRIO/drmemory/issues/334).
drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
run_drconfig = True
if self._options.follow_python:
logging.info("Following python children")
# -unreg fails if not already registered so query for that first
query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
shell=True)
(query_out, query_err) = query_proc.communicate()
if re.search("exe not registered", query_out):
run_drconfig = False # all set
else:
drconfig_cmd += ["-unreg", "python.exe"]
else:
logging.info("Excluding python children")
drconfig_cmd += ["-reg", "python.exe", "-norun"]
if run_drconfig:
drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
if drconfig_retcode:
logging.error("Configuring whether to follow python children failed " \
"with %d.", drconfig_retcode)
raise RuntimeError, "Configuring python children failed "
suppression_count = 0
supp_files = self._options.suppressions
if self.full_mode:
supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
for suppression_file in supp_files:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
# Un-comment to dump Dr.Memory events on error
#proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
# Un-comment and comment next line to debug Dr.Memory
#proc += ["-dr_ops", "-no_hide"]
#proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
#Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
# Ensure we see messages about Dr. Memory crashing!
proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
if self._options.use_debug:
proc += ["-debug"]
proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
if self.log_parent_dir:
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
elif self._options.build_dir:
# The other case is only possible with -t cmdline.
# Anyways, if we omit -symcache_dir the -logdir's value is used which
# should be fine.
symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
if symcache_dir:
if not os.path.exists(symcache_dir):
try:
os.mkdir(symcache_dir)
except OSError:
logging.warning("Can't create symcache dir?")
if os.path.exists(symcache_dir):
proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
# Use -no_summary to suppress DrMemory's summary and init-time
# notifications. We generate our own with drmemory_analyze.py.
proc += ["-batch", "-no_summary"]
# Un-comment to disable interleaved output. Will also suppress error
# messages normally printed to stderr.
#proc += ["-quiet", "-no_results_to_stderr"]
proc += ["-callstack_max_frames", "40"]
# disable leak scan for now
proc += ["-no_count_leaks", "-no_leak_scan"]
# disable warnings about unaddressable prefetches
proc += ["-no_check_prefetch"]
# crbug.com/413215, no heap mismatch check for Windows release build binary
if common.IsWindows() and "Release" in self._options.build_dir:
proc += ["-no_check_delete_mismatch"]
# make callstacks easier to read
proc += ["-callstack_srcfile_prefix",
"build\\src,chromium\\src,crt_build\\self_x86"]
proc += ["-callstack_modname_hide",
"*drmemory*,chrome.dll"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
proc += ["-callstack_truncate_below", ",".join(boring_callers)]
if self.pattern_mode:
proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
elif not self.full_mode:
proc += ["-light"]
proc += self._tool_flags
# Dr.Memory requires -- to separate tool flags from the executable name.
proc += ["--"]
if self._options.indirect_pdfium_test:
wrapper = " ".join(proc)
logging.info("pdfium wrapper = " + wrapper)
proc = self._args
proc += ["--wrapper", wrapper]
return proc
# Note that self._args begins with the name of the exe to be run.
self._args[0] = common.NormalizeWindowsPath(self._args[0])
proc += self._args
return proc
def CreateBrowserWrapper(self, command):
os.putenv("BROWSER_WRAPPER", command)
def Analyze(self, check_sanity=False):
# Use one analyzer for all the log files to avoid printing duplicate reports
#
# TODO(timurrrr): unify this with Valgrind and other tools when we have
# https://github.com/DynamoRIO/drmemory/issues/684
analyzer = drmemory_analyze.DrMemoryAnalyzer()
ret = 0
if not self._options.indirect_pdfium_test:
filenames = glob.glob(self.log_dir + "/*/results.txt")
ret = analyzer.Report(filenames, None, check_sanity)
else:
testcases = glob.glob(self.log_dir + "/testcase.*.logs")
# If we have browser wrapper, the per-test logdirs are named as
# "testcase.wrapper_PID.name".
# Let's extract the list of wrapper_PIDs and name it ppids.
# NOTE: ppids may contain '_', i.e. they are not ints!
ppids = set([f.split(".")[-2] for f in testcases])
for ppid in ppids:
testcase_name = None
try:
f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
testcase_name = f.read().strip()
f.close()
except IOError:
pass
print "====================================================="
print " Below is the report for drmemory wrapper PID=%s." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
# TODO(timurrrr): hm, the PID line is suppressed on Windows...
print " You can find the corresponding test"
print " by searching the above log for 'PID=%s'" % ppid
sys.stdout.flush()
ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
(self.log_dir, ppid))
ret |= analyzer.Report(ppid_filenames, testcase_name, False)
print "====================================================="
sys.stdout.flush()
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-drmemory for the info on Dr. Memory")
return ret
class ToolFactory:
def Create(self, tool_name):
if tool_name == "drmemory" or tool_name == "drmemory_light":
# TODO(timurrrr): remove support for "drmemory" when buildbots are
# switched to drmemory_light OR make drmemory==drmemory_full the default
# mode when the tool is mature enough.
return DrMemory(False, False)
if tool_name == "drmemory_full":
return DrMemory(True, False)
if tool_name == "drmemory_pattern":
return DrMemory(False, True)
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
| |
#!/usr/bin/env python
import argparse
import csv, os, time
import psycopg2 # psycopg2 v2.5.1
import sys
sys.path.append('../modules')
from result import Result
__author__ = "Wade Schulz, Donn Felker, Brent Nelson"
__credits__ = ["Wade Schulz", "Donn Felker", "Brent Nelson"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "Wade Schulz"
__email__ = "wade.schulz@gmail.com"
__status__ = "Research"
# Get command line arguments
parser = argparse.ArgumentParser(description='Load SNP and locus data into PostgreSQL')
parser.add_argument('--dev', action='store_true', help='Only load chromosome 21 for development testing')
parser.add_argument('--db', type=str, help='PostgreSQL database name')
parser.add_argument('--username', type=str, help='PostgreSQL username')
parser.add_argument('--password', type=str, help='PostgreSQL password')
parser.add_argument('--tag', type=str, help='Tag to place in results file')
parser.add_argument('--path', help='Path to chromosome data (dev: ../../res/; otherwise default working directory)')
parser.add_argument('--start', type=str, help='Chromosome to start load from')
parser.add_argument('--indexes', action='store_true', help='Create indexes')
parser.add_argument('--queries', action='store_true', help='Run queries')
args = parser.parse_args()
# Set script version
scriptVersion = "2.0"
# Set default variables
dev = False
databaseName = 'snp_research'
username = 'dev'
password = ''
tag = ''
path = ''
start = '1'
createIndexes = False
runQueries = False
# Update any present from CLI
if args.dev: # If dev mode, only load chr 21
dev = True
path = '../../res/'
if args.path is not None: # If set, use as root path for chromosome data
path = args.path
if args.db is not None: # If set, use as database name for Postgres
databaseName = args.db
if args.username is not None: # Postgres username
username = args.username
if args.password is not None: # Postgres password
password = args.password
if args.tag is not None: # Tag to place in results file
tag = args.tag
if args.start is not None:
start = args.start
if args.indexes is not None:
createIndexes = args.indexes
if args.queries is not None:
runQueries = args.queries
# Open results file and print header
resultsFileName = 'results-pgsql'
if resultsFileName != "":
resultsFileName += '-' + tag
resultsFileName += '.txt'
resultsFile = open(resultsFileName, 'w')
resultsFile.write(scriptVersion + '\n')
result = Result()
resultsFile.write(result.toHeader() + '\n')
# Data files
snpFilePath = 'snpData-chr{0}.txt'
lociFilePath = 'lociData-chr{0}.txt'
# Chromosome list
chromosomes = ["21"] # dev list
# If not in dev mode, iterate through all chromosomes
if dev is False:
chromosomes = ["1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","X","Y","MT"] # complete list
if start != "1": # Allow restart from anywhere in chromosome list, sequentially as ordered above
startList = []
hitMin = False
for cur in chromosomes:
if cur == start:
hitMin = True
if hitMin:
startList.append(cur)
chromosomes = startList
# Create Postgres database, tables if not exists
# For initial connection, connect to user database and then create the experimental db
postgresConnection = psycopg2.connect("dbname=" + username + " user=" + username)
postgresConnection.autocommit = True
createDbCursor = postgresConnection.cursor()
createDbCursor.execute("DROP DATABASE " + databaseName)
createDbCursor.execute("CREATE DATABASE " + databaseName)
createDbCursor.close()
postgresConnection.close() # Reconnect with database name
# Reopen connection to the experimental database to create tables and begin inserts
postgresConnection = psycopg2.connect("dbname=" + databaseName + " user=" + username)
createDbCursor = postgresConnection.cursor()
TABLES = {}
TABLES['snp'] = (
"CREATE TABLE IF NOT EXISTS snp ("
" id serial PRIMARY KEY,"
" rsid varchar,"
" chr varchar,"
" has_sig boolean"
");")
TABLES['locus'] = (
"CREATE TABLE IF NOT EXISTS locus("
" id serial PRIMARY KEY,"
" mrna_acc varchar,"
" gene varchar,"
" class varchar,"
" snp_id integer,"
" CONSTRAINT idx_snp FOREIGN KEY (snp_id) REFERENCES snp (id) ON DELETE NO ACTION ON UPDATE NO ACTION"
");")
for name, ddl in TABLES.iteritems():
createDbCursor.execute(ddl)
postgresConnection.commit()
# Disable triggers/constraints on tables
createDbCursor.execute("ALTER TABLE snp DISABLE trigger ALL;")
createDbCursor.execute("ALTER TABLE locus DISABLE trigger ALL;")
createDbCursor.close()
# Dictionaries and arrays for SQL and MongoDB queries
snpInserts = {} # Dictionary for rsid/insert for SNP data
lociInserts = [] # Array for loci insert queries
rsidList = {} # Dictionary of RSIDs that will also hold the
# primary key for each SNP in SQL
# Load each chromosome into database
for curChr in chromosomes:
result = Result()
result.method = "pgsql"
result.tag = tag
print "Chromosome " + str(curChr)
result.chromosome = str(curChr)
# Set file paths for current chromosome
curSnpFilePath = snpFilePath.format(curChr)
curLociFilePath = lociFilePath.format(curChr)
if len(path) > 0:
curSnpFilePath = path.rstrip('\\').rstrip('/') + '\\' + curSnpFilePath
curLociFilePath = path.rstrip('\\').rstrip('/') + '\\' + curLociFilePath
# Clear dictionaries for loading multiple chromosomes
snpInserts.clear()
lociInserts = []
rsidList.clear()
# Print status and flush stdout for nohup
print "Chromosome " + str(curChr) + ". Reading SNP Data"
result.snpLoadStart = time.time()
sys.stdout.flush()
# Read in data from SNP file and create insert statements
with open(curSnpFilePath,'r') as csvfile:
data = csv.reader(csvfile,delimiter='\t')
for row in data:
if(len(row) == 3):
hasSig = False
if row[2] != '' and row[2] != 'false':
hasSig = True
rsidList[row[0]] = 0
insStr = "INSERT INTO snp (rsid, chr, has_sig) VALUES ('{0}', '{1}', {2}) RETURNING id".format(row[0], row[1], hasSig)
snpInserts[row[0]] = insStr
# Data for reporting
result.snpLoadEnd = time.time()
result.totalSnps = len(snpInserts)
# Insert SNP data into postgres
cursor = postgresConnection.cursor()
print "Chromosome " + str(curChr) + ". Inserting SNP Data."
sys.stdout.flush()
# Log current run start time
result.snpInsertStart = time.time()
# For each snp, insert record and then grab primary key
for rsid,snp in snpInserts.iteritems():
cursor.execute(snp)
rsidList[rsid] = cursor.fetchone()[0]
# Commit all inserts to pgsql and grab end time
postgresConnection.commit()
# Log completed time, close pgsql cursor
result.snpInsertEnd=time.time()
cursor.close()
# Clear list of SNPs to free up memory
snpInserts.clear()
print "Chromosome " + str(curChr) + ". Reading loci Data."
result.lociLoadStart = time.time()
# Now that we have primary keys for each SNP, read in loci data
with open(curLociFilePath,'r') as csvfile:
data = csv.reader(csvfile,delimiter='\t')
for row in data:
if(len(row) == 4):
# Load loci in pgsql statements
if row[0] in rsidList and rsidList[row[0]] > 0: # If RSID value is present, load with PK
insStr = "INSERT INTO locus (mrna_acc, gene, class, snp_id) VALUES ('{0}', '{1}', '{2}', {3})".format(row[1], row[2], row[3], rsidList[row[0]])
lociInserts.append(insStr)
# Data for reporting
result.lociLoadEnd = time.time()
result.totalLoci = len(lociInserts)
# Create new cursor, enter loci data into pgsql
cursor = postgresConnection.cursor()
print "Chromosome " + str(curChr) + ". Inserting loci data."
# Log current run start time and number of loci
result.lociInsertStart = time.time()
# Insert each locus
for locus in lociInserts:
cursor.execute(locus)
# Commit data to pgsql
postgresConnection.commit()
# Log end time and total pgsql time
result.lociInsertEnd = time.time()
# Close pgsql cursor
cursor.close()
print result.toTerm()
resultsFile.write(result.toString() + '\n')
sys.stdout.flush()
# Create new cursor, create indexes and run test queries
cursor = postgresConnection.cursor()
# Turn on triggers, create FK index since PGSQL does not
# automatically index FKs
print "Turning on key checks..."
cursor.execute("ALTER TABLE snp ENABLE trigger ALL;")
cursor.execute("ALTER TABLE locus ENABLE trigger ALL;")
cursor.execute("CREATE INDEX idx_snpid_fk ON locus (snp_id)")
# Create indexes if requested in arguments
if createIndexes:
result = Result()
result.method = "pgsql-Idx"
result.tag = tag
rsidIndex = "CREATE UNIQUE INDEX idx_rsid ON snp (rsid)"
clinIndex = "CREATE INDEX idx_clin ON snp (has_sig)"
geneIndex = "CREATE INDEX idx_gene ON locus (gene)"
print "Creating RSID index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(rsidIndex)
postgresConnection.commit()
idxEnd = time.time()
result.idxRsid = idxEnd - idxStart
print "Creating ClinSig index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(clinIndex)
postgresConnection.commit()
idxEnd = time.time()
result.idxClinSig = idxEnd - idxStart
print "Creating Gene index..."
sys.stdout.flush()
idxStart = time.time()
cursor.execute(geneIndex)
postgresConnection.commit()
idxEnd = time.time()
result.idxGene = idxEnd - idxStart
resultsFile.write(result.toString() + '\n')
# Run queries if requested in args
if runQueries:
for z in range(1,11):
result = Result()
result.method = "pgsql-Qry" + str(z)
result.tag = tag
print "Running queries, count " + str(z)
idxStart = time.time()
cursor.execute("SELECT * FROM locus l, snp s WHERE l.snp_id = s.id AND s.rsid = 'rs8788'")
idxEnd = time.time()
result.qryByRsid = idxEnd - idxStart
idxStart = time.time()
cursor.execute("SELECT count(s.id) FROM locus l, snp s WHERE l.snp_id = s.id AND s.has_sig = true")
idxEnd = time.time()
result.qryByClinSig = idxEnd - idxStart
idxStart = time.time()
cursor.execute("SELECT count(distinct s.rsid) FROM locus l, snp s WHERE l.snp_id = s.id AND l.gene = 'GRIN2B'")
idxEnd = time.time()
result.qryByGene = idxEnd - idxStart
idxStart = time.time()
cursor.execute("SELECT count(distinct s.rsid) FROM locus l, snp s WHERE l.snp_id = s.id AND l.gene = 'GRIN2B' AND s.has_sig = true")
idxEnd = time.time()
result.qryByGeneSig = idxEnd - idxStart
resultsFile.write(result.toString() + '\n')
# Close pgsql cursor
cursor.close()
resultsFile.close()
postgresConnection.close()
print "Run complete."
| |
################## http://www.pygame.org/wiki/2DVectorClass ##################
import operator
import math
class Vec2d(object):
"""2d vector class, supports vector and scalar operators,
and also provides a bunch of high level functions
"""
__slots__ = ['x', 'y']
def __init__(self, x_or_pair, y = None):
if y == None:
self.x = x_or_pair[0]
self.y = x_or_pair[1]
else:
self.x = x_or_pair
self.y = y
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("Invalid subscript "+str(key)+" to Vec2d")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid subscript "+str(key)+" to Vec2d")
# String representaion (for debugging)
def __repr__(self):
return 'Vec2d(%s, %s)' % (self.x, self.y)
# Comparison
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x != other[0] or self.y != other[1]
else:
return True
def __nonzero__(self):
return bool(self.x or self.y)
# Generic operator handlers
def _o2(self, other, f):
"Any two-operator operation where the left operand is a Vec2d"
if isinstance(other, Vec2d):
return Vec2d(f(self.x, other.x),
f(self.y, other.y))
elif (hasattr(other, "__getitem__")):
return Vec2d(f(self.x, other[0]),
f(self.y, other[1]))
else:
return Vec2d(f(self.x, other),
f(self.y, other))
def _r_o2(self, other, f):
"Any two-operator operation where the right operand is a Vec2d"
if (hasattr(other, "__getitem__")):
return Vec2d(f(other[0], self.x),
f(other[1], self.y))
else:
return Vec2d(f(other, self.x),
f(other, self.y))
def _io(self, other, f):
"inplace operator"
if (hasattr(other, "__getitem__")):
self.x = f(self.x, other[0])
self.y = f(self.y, other[1])
else:
self.x = f(self.x, other)
self.y = f(self.y, other)
return self
# Addition
def __add__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x + other.x, self.y + other.y)
elif hasattr(other, "__getitem__"):
return Vec2d(self.x + other[0], self.y + other[1])
else:
return Vec2d(self.x + other, self.y + other)
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vec2d):
self.x += other.x
self.y += other.y
elif hasattr(other, "__getitem__"):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
# Subtraction
def __sub__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x - other.x, self.y - other.y)
elif (hasattr(other, "__getitem__")):
return Vec2d(self.x - other[0], self.y - other[1])
else:
return Vec2d(self.x - other, self.y - other)
def __rsub__(self, other):
if isinstance(other, Vec2d):
return Vec2d(other.x - self.x, other.y - self.y)
if (hasattr(other, "__getitem__")):
return Vec2d(other[0] - self.x, other[1] - self.y)
else:
return Vec2d(other - self.x, other - self.y)
def __isub__(self, other):
if isinstance(other, Vec2d):
self.x -= other.x
self.y -= other.y
elif (hasattr(other, "__getitem__")):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
# Multiplication
def __mul__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x*other.x, self.y*other.y)
if (hasattr(other, "__getitem__")):
return Vec2d(self.x*other[0], self.y*other[1])
else:
return Vec2d(self.x*other, self.y*other)
__rmul__ = __mul__
def __imul__(self, other):
if isinstance(other, Vec2d):
self.x *= other.x
self.y *= other.y
elif (hasattr(other, "__getitem__")):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
# Division
def __div__(self, other):
return self._o2(other, operator.div)
def __rdiv__(self, other):
return self._r_o2(other, operator.div)
def __idiv__(self, other):
return self._io(other, operator.div)
def __floordiv__(self, other):
return self._o2(other, operator.floordiv)
def __rfloordiv__(self, other):
return self._r_o2(other, operator.floordiv)
def __ifloordiv__(self, other):
return self._io(other, operator.floordiv)
def __truediv__(self, other):
return self._o2(other, operator.truediv)
def __rtruediv__(self, other):
return self._r_o2(other, operator.truediv)
def __itruediv__(self, other):
return self._io(other, operator.floordiv)
# Modulo
def __mod__(self, other):
return self._o2(other, operator.mod)
def __rmod__(self, other):
return self._r_o2(other, operator.mod)
def __divmod__(self, other):
return self._o2(other, operator.divmod)
def __rdivmod__(self, other):
return self._r_o2(other, operator.divmod)
# Exponentation
def __pow__(self, other):
return self._o2(other, operator.pow)
def __rpow__(self, other):
return self._r_o2(other, operator.pow)
# Bitwise operators
def __lshift__(self, other):
return self._o2(other, operator.lshift)
def __rlshift__(self, other):
return self._r_o2(other, operator.lshift)
def __rshift__(self, other):
return self._o2(other, operator.rshift)
def __rrshift__(self, other):
return self._r_o2(other, operator.rshift)
def __and__(self, other):
return self._o2(other, operator.and_)
__rand__ = __and__
def __or__(self, other):
return self._o2(other, operator.or_)
__ror__ = __or__
def __xor__(self, other):
return self._o2(other, operator.xor)
__rxor__ = __xor__
# Unary operations
def __neg__(self):
return Vec2d(operator.neg(self.x), operator.neg(self.y))
def __pos__(self):
return Vec2d(operator.pos(self.x), operator.pos(self.y))
def __abs__(self):
return Vec2d(abs(self.x), abs(self.y))
def __invert__(self):
return Vec2d(-self.x, -self.y)
# vectory functions
def get_length_sqrd(self):
return self.x**2 + self.y**2
def get_length(self):
return math.sqrt(self.x**2 + self.y**2)
def __setlength(self, value):
length = self.get_length()
self.x *= value/length
self.y *= value/length
length = property(get_length, __setlength, None, "gets or sets the magnitude of the vector")
def rotate(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
self.x = x
self.y = y
def rotated(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
return Vec2d(x, y)
def get_angle(self):
if (self.get_length_sqrd() == 0):
return 0
return math.degrees(math.atan2(self.y, self.x))
def __setangle(self, angle_degrees):
self.x = self.length
self.y = 0
self.rotate(angle_degrees)
angle = property(get_angle, __setangle, None, "gets or sets the angle of a vector")
def get_angle_between(self, other):
cross = self.x*other[1] - self.y*other[0]
dot = self.x*other[0] + self.y*other[1]
return math.degrees(math.atan2(cross, dot))
def normalized(self):
length = self.length
if length != 0:
return self/length
return Vec2d(self)
def normalize_return_length(self):
length = self.length
if length != 0:
self.x /= length
self.y /= length
return length
def perpendicular(self):
return Vec2d(-self.y, self.x)
def perpendicular_normal(self):
length = self.length
if length != 0:
return Vec2d(-self.y/length, self.x/length)
return Vec2d(self)
def dot(self, other):
return float(self.x*other[0] + self.y*other[1])
def get_distance(self, other):
return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)
def get_dist_sqrd(self, other):
return (self.x - other[0])**2 + (self.y - other[1])**2
def projection(self, other):
other_length_sqrd = other[0]*other[0] + other[1]*other[1]
projected_length_times_other_length = self.dot(other)
return other*(projected_length_times_other_length/other_length_sqrd)
def cross(self, other):
return self.x*other[1] - self.y*other[0]
def interpolate_to(self, other, range):
return Vec2d(self.x + (other[0] - self.x)*range, self.y + (other[1] - self.y)*range)
def convert_to_basis(self, x_vector, y_vector):
return Vec2d(self.dot(x_vector)/x_vector.get_length_sqrd(), self.dot(y_vector)/y_vector.get_length_sqrd())
def __getstate__(self):
return [self.x, self.y]
def __setstate__(self, dict):
self.x, self.y = dict
########################################################################
## Unit Testing ##
########################################################################
if __name__ == "__main__":
import unittest
import pickle
####################################################################
class UnitTestVec2D(unittest.TestCase):
def setUp(self):
pass
def testCreationAndAccess(self):
v = Vec2d(111,222)
self.assert_(v.x == 111 and v.y == 222)
v.x = 333
v[1] = 444
self.assert_(v[0] == 333 and v[1] == 444)
def testMath(self):
v = Vec2d(111,222)
self.assertEqual(v + 1, Vec2d(112,223))
self.assert_(v - 2 == [109,220])
self.assert_(v * 3 == (333,666))
self.assert_(v / 2.0 == Vec2d(55.5, 111))
self.assert_(v / 2 == (55.5, 111))
self.assert_(v ** Vec2d(2,3) == [12321, 10941048])
self.assert_(v + [-11, 78] == Vec2d(100, 300))
self.assert_(v / [10,2] == [11.1,111])
def testReverseMath(self):
v = Vec2d(111,222)
self.assert_(1 + v == Vec2d(112,223))
self.assert_(2 - v == [-109,-220])
self.assert_(3 * v == (333,666))
self.assert_([222,888] / v == [2,4])
self.assert_([111,222] ** Vec2d(2,3) == [12321, 10941048])
self.assert_([-11, 78] + v == Vec2d(100, 300))
def testUnary(self):
v = Vec2d(111,222)
v = -v
self.assert_(v == [-111,-222])
v = abs(v)
self.assert_(v == [111,222])
def testLength(self):
v = Vec2d(3,4)
self.assert_(v.length == 5)
self.assert_(v.get_length_sqrd() == 25)
self.assert_(v.normalize_return_length() == 5)
self.assert_(v.length == 1)
v.length = 5
self.assert_(v == Vec2d(3,4))
v2 = Vec2d(10, -2)
self.assert_(v.get_distance(v2) == (v - v2).get_length())
def testAngles(self):
v = Vec2d(0, 3)
self.assertEquals(v.angle, 90)
v2 = Vec2d(v)
v.rotate(-90)
self.assertEqual(v.get_angle_between(v2), 90)
v2.angle -= 90
self.assertEqual(v.length, v2.length)
self.assertEquals(v2.angle, 0)
self.assertEqual(v2, [3, 0])
self.assert_((v - v2).length, .00001)
self.assertEqual(v.length, v2.length)
v2.rotate(300)
self.assertAlmostEquals(v.get_angle_between(v2), -60)
v2.rotate(v2.get_angle_between(v))
angle = v.get_angle_between(v2)
self.assertAlmostEquals(v.get_angle_between(v2), 0)
def testHighLevel(self):
basis0 = Vec2d(5.0, 0)
basis1 = Vec2d(0, .5)
v = Vec2d(10, 1)
self.assert_(v.convert_to_basis(basis0, basis1) == [2, 2])
self.assert_(v.projection(basis0) == (10, 0))
self.assert_(basis0.dot(basis1) == 0)
def testCross(self):
lhs = Vec2d(1, .5)
rhs = Vec2d(4,6)
self.assert_(lhs.cross(rhs) == 4)
def testComparison(self):
int_vec = Vec2d(3, -2)
flt_vec = Vec2d(3.0, -2.0)
zero_vec = Vec2d(0, 0)
self.assert_(int_vec == flt_vec)
self.assert_(int_vec != zero_vec)
self.assert_((flt_vec == zero_vec) == False)
self.assert_((flt_vec != int_vec) == False)
self.assert_(int_vec == (3, -2))
self.assert_(int_vec != [0, 0])
self.assert_(int_vec != 5)
self.assert_(int_vec != [3, -2, -5])
def testInplace(self):
inplace_vec = Vec2d(5, 13)
inplace_ref = inplace_vec
inplace_src = Vec2d(inplace_vec)
inplace_vec *= .5
inplace_vec += .5
inplace_vec /= (3, 6)
inplace_vec += Vec2d(-1, -1)
self.assertEquals(inplace_vec, inplace_ref)
def testPickle(self):
testvec = Vec2d(5, .3)
testvec_str = pickle.dumps(testvec)
loaded_vec = pickle.loads(testvec_str)
self.assertEquals(testvec, loaded_vec)
####################################################################
unittest.main()
########################################################################
| |
#!/usr/bin/env python
# Copyright 2019, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from socket import gaierror, gethostbyname
from urlparse import urlparse
from ansible.module_utils.basic import AnsibleModule # noqa: ignore=H303
from keystoneauth1.exceptions import MissingRequiredOptions
import netaddr
from openstack import connect
DOCUMENTATION = """
---
module: service_discovery
short_description:
- Discovery module for MaaS that uses the OpenStack service catalog.
description:
- This module utilizes an OpenStack SDK connection to obtain the service
catalog and create a set of facts based on the endpoint data.
options:
raxdc:
description:
- A boolean identifying whether the deployment is in a Rackspace
data center (RDC) or customer data center (CDC).
required: true
internal_vip:
description:
- An IP address identifying the internal OpenStack VIP.
required: true
external_vip:
description:
- An IP address identifying the external OpenStack VIP.
required: true
author:
- Nathan Pawelek (@npawelek)
"""
EXAMPLES = """
- name: Service discovery
service_discovery:
raxdc: False
internal_vip: 172.29.236.100
external_vip: 172.99.120.153
"""
class ServiceDiscovery(object):
def __init__(self, module):
self.module = module
self.raxdc = module.params.get('raxdc')
self.internal_vip = module.params.get('internal_vip')
self.external_vip = module.params.get('external_vip')
self.conn = self.build_sdk_connection()
self.catalog_details = dict()
self.cert_expiry = False
self.cert_expiry_list = list(dict())
self.pnm = False
self.api_endpoints = dict()
self.maas_external_hostname = ''
self.maas_external_ip_address = ''
self.use_public = False
self.cinder_backends = {
"local": list(),
"shared": list()
}
def build_sdk_connection(self):
"""
Create a universal connection to OpenStack with the OpenStack SDK.
It will use the defined configuration from
/root/.config/openstack/clouds.yaml.
"""
try:
sdk_conn = connect(cloud='default', verify=False)
except MissingRequiredOptions as e:
message = ('Missing option in clouds.yaml: %s' % str(e))
self.module.fail_json(msg=message)
else:
return sdk_conn
def parse_service_catalog(self):
"""
Parse the OpenStack service catalog and identify service components.
"""
try:
catalog = self.conn.service_catalog
items = ['protocol', 'port', 'address']
invalid_chars = '-_'
for service in catalog:
# NOTE(npawelek): Sanitize the service catalog name to remove
# dashes and underscores
service_name = service['name']
for c in invalid_chars:
service_name = service_name.replace(c, '')
for endpoint in service['endpoints']:
url = urlparse(endpoint.get('url'))
for item in items:
key_name = "%s_%s_%s" % (
service_name,
endpoint['interface'],
item)
if item == 'protocol':
self.catalog_details[key_name] = str(url.scheme)
elif item == 'port':
self.parse_port(key_name, url)
else:
self.catalog_details[key_name] = str(
url.netloc.split(':')[0])
except Exception as e:
message = ('Issue parsing the service catalog. The following'
'error was received: %s' % str(e))
self.module.fail_json(msg=message)
def parse_port(self, key, url):
"""
Identify endpoint port. If a port is not detected in the url, this
attempts to use the protocol to associate a valid port.
"""
if not url.port:
if url.scheme == 'https':
self.catalog_details[key] = 443
elif url.scheme == 'http':
self.catalog_details[key] = 80
else:
raise Exception('Endpoint object has an unexpected port and'
'scheme: %s' % url)
else:
self.catalog_details[key] = url.port
def generate_facts(self):
"""
Gather information based on data center (raxdc or cdc) and endpoint
validation. This results in whether PNM should be enabled, a list
of certificates to validate expiry on, and the associated endpoint
targets for private or public pollers.
It's assumed that RDC deployments are accessible from public Rackspace
Monitoring pollers, so we only check the public interface first. If
PNM is required, then iterate over internal interfaces.
"""
if self.raxdc:
self.validate_endpoints(['public'])
if self.pnm:
self.validate_endpoints(['internal'])
else:
self.validate_endpoints(['public', 'internal'])
# No CDC private endpoints found. Must validate public endpoints.
if len(self.api_endpoints) == 0:
self.use_public = True
self.validate_endpoints(['public', 'internal'],
use_public=self.use_public)
# Set generic fallback vip depending on PNM
if self.pnm and self.use_public is False:
if self.internal_vip.replace('.', '').isdigit():
self.maas_external_hostname = self.internal_vip
self.maas_external_ip_address = self.internal_vip
else:
vip_ip = self.get_url_ip_address(self.internal_vip)
self.maas_external_hostname = self.internal_vip
self.maas_external_ip_address = vip_ip
else:
if self.external_vip.replace('.', '').isdigit():
self.maas_external_hostname = self.external_vip
self.maas_external_ip_address = self.external_vip
else:
vip_ip = self.get_url_ip_address(self.external_vip)
self.maas_external_hostname = self.external_vip
self.maas_external_ip_address = vip_ip
def validate_endpoints(self, interface_list, use_public=False):
"""
Determine whether the endpoint is natively usable or requires
additional overrides for the target URL and IP. This will run against
both presented interfaces to detect values usable for both
lb_api_checks and private_lb_api_checks.
"""
for interface in interface_list:
# Only use address keys from catalog (netloc)
string = "_%s_address" % interface
# Walk the service catalog
for key, value in self.catalog_details.items():
if string in key:
# Detect IP address or hostname
if value.replace('.', '').isdigit():
is_private = self.validate_private_ip(value)
if is_private is False and self.raxdc is False:
if use_public is True:
self.pnm = True
self.service_specific_overrides(key, value)
else:
pass
elif is_private:
self.pnm = True
self.service_specific_overrides(key, value)
else:
self.service_specific_overrides(key, value)
else:
# Ensure the hostname is resolvable by the system
url_ip = self.get_url_ip_address(value)
# Validation for SSL cert expiry
self.cert_expiry_check(key, value, url_ip)
# Determine if the URL is private
is_private = self.validate_private_ip(url_ip)
# CDC environments should always have PNM enabled.
# Skip if public endpoints have public addresses
# which likely aren't accessible from PNM poller
if is_private is False and self.raxdc is False:
if use_public is True:
self.pnm = True
self.service_specific_overrides(key,
value,
url_ip)
else:
pass
# Enable PNM and configure associated api facts
elif is_private:
self.pnm = True
self.service_specific_overrides(key, value, url_ip)
# Configure api facts for public pollers
else:
self.service_specific_overrides(key, value, url_ip)
def get_url_ip_address(self, url):
"""Ensure the hostname is resolvable by the system"""
try:
url_ip = gethostbyname(url)
return url_ip
except gaierror as e:
message = ('%s does not appear to be resolvable '
'by DNS. Ensure the address is '
'resolvable or add an entry to '
'/etc/hosts on all controller nodes. '
'PNM Exception: %s') % (url, str(e))
self.module.fail_json(msg=message)
except Exception as e:
message = 'Failed to get URL ip address for %s. Error: %s' % (
url, str(e))
self.module.fail_json(msg=message)
def cert_expiry_check(self, key, url, url_ip):
"""
Enable the certificate expiry check and create a unique list of
endpoints for validating all certificates.
"""
endpoint = "%s_%s" % (key.split('_')[0], key.split('_')[1])
protocol_key = "%s_protocol" % endpoint
# Ensure protocol is https
if self.catalog_details[protocol_key] == 'https':
self.cert_expiry = True
# Determine if the URL (excluding port) is already in the cert list
url_check = len(
[i for i in self.cert_expiry_list if url in i.get('cert_url',
'')]
)
if url_check == 0:
port_key = "%s_port" % endpoint
cert_url = "https://%s:%s/" % (url,
self.catalog_details[port_key])
cert_dict = {
"cert_url": cert_url,
"cert_ip": url_ip
}
self.cert_expiry_list.append(cert_dict)
def validate_private_ip(self, address):
"""
Determine whether the associated IP address is valid and is an
RFC 1918 private address (non-routable).
"""
try:
ipaddr = netaddr.IPAddress(address)
except netaddr.AddrFormatError as e:
message = ('%s is not a proper IP address according to '
'netaddr. PNM Exception: %s') % (address, str(e))
self.module.fail_json(msg=message)
except Exception as e:
message = 'Unable to validate IP %s. Error: %s' % (address, str(e))
self.module.fail_json(msg=message)
else:
if ipaddr.is_private():
return True
else:
return False
def service_specific_overrides(self, key, value, url_ip=None):
items = ['url', 'ip']
endpoint = "%s_%s" % (key.split('_')[0], key.split('_')[1])
for item in items:
key_name = "%s_%s" % (endpoint, item)
if item == 'url':
self.set_full_address(key_name, value)
else:
if url_ip is None:
self.api_endpoints[key_name] = value
else:
self.api_endpoints[key_name] = url_ip
def set_full_address(self, key, address):
"""
Defines the full address of a specific endpoint. proto://address:port
"""
endpoint = "%s_%s" % (key.split('_')[0], key.split('_')[1])
for attr in 'protocol', 'port':
attr_key = "%s_%s" % (endpoint, attr)
if attr == 'protocol':
protocol = self.catalog_details[attr_key]
else:
port = self.catalog_details[attr_key]
self.api_endpoints[key] = "%s://%s:%s/" % (protocol, address, port)
def get_cinder_backends(self):
"""Discovers hosts for local and/or shared block storage backend pools.
Queries the OpenStack Block Storage API to identify all backend
pools. Using the volume backend name (everything after #), hosts
are split into local and/or shared volume backends. This will
provide dynamic cinder-volume hosts for any nomenclature.
Returns:
A dict mapping of volume hosts within local and/or shared backends.
For example:
{
'local': [
'infra02@midtier',
'infra01@midtier',
'infra03@midtier',
'infra03@ceph',
'infra01@ceph',
'infra02@ceph'
],
'shared': []
}
"""
cinder = self.conn.block_storage
backend_pools = [str(bp.name) for bp in cinder.backend_pools()]
backend_names = [i.split('#')[-1] for i in backend_pools]
unique_backend_names = set(backend_names)
unique_backend_counts = dict()
for name in unique_backend_names:
unique_backend_counts[name] = backend_names.count(name)
for pool in backend_pools:
host, backend_name = pool.split('#')
if unique_backend_counts[backend_name] == 1:
self.cinder_backends['shared'].append(host)
else:
self.cinder_backends['local'].append(host)
def main():
module = AnsibleModule(
argument_spec=dict(
raxdc=dict(required=True, type='bool'),
internal_vip=dict(required=True),
external_vip=dict(required=True),
cinder_discovery=dict(required=True, type='bool')
),
supports_check_mode=False
)
discovery = ServiceDiscovery(module)
discovery.parse_service_catalog()
discovery.generate_facts()
if module.params.get('cinder_discovery') is True:
discovery.get_cinder_backends()
module.exit_json(
changed=False,
ansible_facts={
'cert_expiry': discovery.cert_expiry,
'cert_expiry_list': discovery.cert_expiry_list,
'pnm': discovery.pnm,
'api_endpoints': discovery.api_endpoints,
'maas_external_hostname': discovery.maas_external_hostname,
'maas_external_ip_address': discovery.maas_external_ip_address,
'maas_cinder_local_backends': discovery.cinder_backends['local'],
'maas_cinder_shared_backends': discovery.cinder_backends['shared']
}
)
if __name__ == '__main__':
main()
| |
from decimal import *
from functions import spinMatchFinder, levelExtract, NUCIDgen, Correct_Uncertainty
from uncertainty import multuncert
from RecordClasses import *
class data:##This is the main data class.
def __init__(self,ENSDF,ISOvar,option,betaVar,energyLimit = 999999999):
###maybe get rid of option, find out how energy limit and max spin are used
##Initialize Parameters
self.data = []
self.name = ISOvar
self.op = option
self.decay = betaVar
## nucID is what is compared to the first 6 characters of the line to find the correct data
nucID=self.name.upper()
if self.op == 'two':#FIXME Decay Data setup
parent = ''
daughter = ''
Avalue = ''
## assign Avalue and parent values
for char in nucID:
if char.isnumeric():
Avalue = Avalue + char
elif char.isalpha():
parent = parent + char
perTable = open("ElementList.txt","r")
periodicTable = perTable.readline()
periodicTable = periodicTable.split(',')
periodicTable[-1] = periodicTable[-1][:-1]
## Assign daughter nucleus
for item in periodicTable:
if item.upper() == parent:
index = periodicTable.index(item)
if self.decay == "B+":
daughter = periodicTable[index-1].upper()
decayLabel = 'E'
elif self.decay == "B-":
daughter = periodicTable[index+1].upper()
decayLabel = 'B'
parent = Avalue+parent
daughter = NUCIDgen(Avalue+daughter)
#print('Parent: '+ parent)
#print('Daughter: '+daughter)
nucID = daughter
else:
nucID = NUCIDgen(nucID)
##open the appropriate ensdf file
self.f = open("Data/"+str(ENSDF),'rU')
linecount = 0 ## printing linecount can help locate problem-causing lines in the ensdf file
desiredData = False
adoptedGammas = True
needDecayRec = False
need_ss_info = False
for line in self.f:
linecount+=1
## The line parsing algorithm is derived from the labeling system of the ensdf files
## See the endsf manual, pg. 22, for more information about how the lines of data are organized
## Check if line is an End Record (end of a data set)
if (desiredData and line[0:6].strip() == ''):
if option == 'two':
desiredData = False
if adoptedGammas: ## End of Adopted Gammas Dataset
adoptedGammas = False
## adoptedLevelRec is used when reading the Decay Data Set
adoptedLevelRec = self.data[:]
self.data = []
continue
else: ## Option 1 & 3
break
## dsid is used in identifying decay data
dsid = line[9:39].split(' ')
## Locate the level records in the ADOPTED GAMMAS Dataset
## i.e. identifies which lines in the data file have relevant data
if (adoptedGammas and line[6:8]==' L' and line[0:6]==nucID):
#print(linecount,line[:-1])
## set desiredData bool so the program wil exit after reading adopted data
desiredData = True
##[name,energy,jpi,uncert,hlife,dhlife] <- output of levelExtract
extractedData = levelExtract(line, self.data)
## The try/except checks if levelExtract is returning a continue code
try:
## assign data to LevelRecord class object
recordData = LevelRecord(*extractedData)
except TypeError:
## levelExtract passes error codes for continue
if extractedData == [-1]:
continue
else:
print('*** Improper initialization of LevelRecord object\n')
##Deliberatly crash the program so that bugs can be found
float('crash')
if(float(recordData.energy)<=energyLimit):
## include the data
self.data.append(recordData)
else:
if option == 'two':
continue
else:
break
## If no ground state energy is given, move on to the next isotope
if recordData.jpi =='X':
print ('Missing ground state energy data for '+nucID)
break
## Get Decay Data ##
if self.op == "two" and not adoptedGammas:
## Locate identification record for the decay dataset
if (line[0:6] == daughter and line[6:9] == ' ' and dsid[0] == parent):
desiredData = True
if desiredData == True:
## Locate Parent Record and retrieve data
if (line[0:6] == NUCIDgen(parent) and line[6:8]==' P'):
extractedData = levelExtract(line, self.data)
## The try/except checks if levelExtract is returning a continue code
try:
## assign data to LevelRecord class object
recordData = LevelRecord(*extractedData)
except TypeError:
## levelExtract passes error codes for continue
if extractedData == [-1]:
continue
else:
print('*** Improper initialization of LevelRecord object\n')
##Deliberatly crash the program so that bugs can be found
float('crash')
if(float(recordData.energy)<=energyLimit):
self.data.append(recordData)
## Locate the Normalization record for scaling branching ratios
if (line[0:6] == daughter and line[6:8] == ' N'):
## See ensdf manual for more information on what these terms are (pg. 18)
BR = line[31:39].strip()
dBR = line[39:41].strip()
NB = line[41:49].strip()
dNB = line[49:55].strip()
if BR == '':
BR = '1'
if NB == '':
NB = '1'
#if dBR == '':
# dBR = '0'
#else:
# dBR = Correct_Uncertainty(BR,dBR)
if any(char.isalpha() for char in dNB):
d_scale_factor = dNB
elif dNB == '':
dNB = '0'
d_scale_factor = multuncert(float(NB),float(BR),float(dNB),float(dNB))
else:
dNB = Correct_Uncertainty(NB,dNB)
d_scale_factor = multuncert(float(NB),float(BR),float(dNB),float(dNB))
scale_factor = float(NB)*float(BR)
## Locate the PN record to use instead of N record scaling
## the N rec is only used if PN rec is empty
if (line[0:6] == daughter and line[6:8] == 'PN'):
NBBR = line[41:49].strip()
dNBBR = line[49:55].strip()
if not NBBR == '':
if dNBBR == '':
## Use d_scale_factor from N record
pass
else:
d_scale_factor = ScaleUncert(NBBR,dNBBR)
scale_factor = float(NBBR)
## Else: use scale_factor from N record
## Locate daughter Level Record
elif (line[0:6] == daughter and line[6:8]==' L'):
## Get data from adoptedLevelRec
extractedData = levelExtract(line, self.data)
## The try/except checks if levelExtract is returning a continue code
try:
## assign data to LevelRecord class object
recordData = LevelRecord(*extractedData)
except TypeError:
## levelExtract passes error codes for continue
if extractedData == [-1]:
continue
else:
print('*** Improper initialization of LevelRecord object\n')
##Deliberatly crash the program so that bugs can be found
float('crash')
if(float(recordData.energy)<=energyLimit):
dataMatch = False
errorList = []
## Frequently in the Decay Data Sets, the level records for the daughter isomers lack half life data, so that state's level record from the Adopted Gammas Data Set is used instead (all of the Gamma Level records are constained in adoptedLevelRec).
## Find matching Adopted Gamma record
for record in adoptedLevelRec:
if Decimal(record.energy) == Decimal(recordData.energy):
### Here I got rid of a proper string copy ([:]) and am now just copying LevelRecord classes. This MAY cause problems if a state is in the decay data set twice, where modifying one instance of the class also changes the other (manifesting in the duplicate state having twice as much ionization or mass energy. #FIXME confirm that this class copy works for cases such as 38K which has an excited isomer that B decays
matchedRecord = record ##Necessary string copy
self.data.append(matchedRecord)
dataMatch = True
break
## Sometimes the energy of a state differs between the Decay Data Set and the Adopted Data Set. A percent error (of energy) calculation is used to determine the closest Adopted Data Set level record for a given Decay Data Set level record.
else:
errorPercent = abs((Decimal(record.energy)-Decimal(recordData.energy))/Decimal(recordData.energy)*Decimal('100'))
errorList.append(errorPercent)
if not dataMatch:
## MAXERROR is the maximum percent error with which a Decay Data Set state can be matched to an Adopted Gammas state.
MAXERROR = 1
minIndex = errorList.index(min(errorList))
if errorList[minIndex] < MAXERROR:
minRec = adoptedLevelRec[minIndex]
closestRec = minRec ##Necessary string copy #FIXME remove this?
self.data.append(closestRec)
## Inform the user that a state has been imperfectly matched
#print(recordData[0]+' state at '+recordData[1]+' keV matched to adopted level at '+adoptedLevelRec[minIndex][1]+' keV w/ error of '+str(round(errorList[minIndex],4))+'%.')
## Case where the nearest Adopted Data Set level record is not within MAXERROR percent of the Decay Data Set level record.
else:
#print('No adopted record found for '+recordData[0]+' at '+recordData[1]+' keV with under '+str(MAXERROR)+'% error.')
self.data.append(recordData)
if needDecayRec == True:
##no Decay record for previous daughter Level rec
decayRecData = DecayRecord(self.data[-2], '0','0', '0', '0', '0')
self.data[-2] = decayRecData
needDecayRec = True
errorList = []
else:
continue
## Identify decay record
elif(needDecayRec and line[0:6] == daughter and line[6:8]==' '+decayLabel):
## Variables named *I are branching intensities
betaI = line[21:29].strip() ##Beta decay branching intensity
dbetaI = line[29:31].strip()
## give uncertainty correct magnitude
if any (char.isalpha() for char in dbetaI):
pass
elif dbetaI == '':
pass
elif ('.' in betaI):
dbetaI = str(float(Correct_Uncertainty(betaI,dbetaI))*scale_factor)
ecI = line[31:39].strip() ##Electron Capture branching intensity
decI = line[39:41].strip()
## give uncertainty correct magnitude
if any (char.isalpha() for char in decI):
pass
elif decI == '':
pass
elif ('.' in ecI):
decI = str(float(Correct_Uncertainty(ecI,decI))*scale_factor)
## get total branching intensity FIXME error prop
if ecI == '' and betaI == '':
totBranchI = ''
elif ecI == '':
totBranchI = str(float(betaI)*scale_factor)
elif betaI == '':
totBranchI = str(float(ecI)*scale_factor)
else:
totBranchI = str((float(betaI) + float(ecI))*scale_factor)
needDecayRec = False
decayRecData = DecayRecord(self.data[-1], betaI, dbetaI, ecI, decI, totBranchI)
self.data[-1] = decayRecData
def export(self,fExtOption = '.dat',extraTitleText = ''):
fileName=str(self.name)+extraTitleText+fExtOption##creates filename
fileName="Output/" + "gnuPlot/"+fileName.replace('/','_')
datFile = open(fileName,'wb')##Creates a file with a valid file name.
for i in range(len(self.data)):
lineToWrite = self.data[i].make_data_string()
datFile.write(str.encode(lineToWrite))
## Filters by desired spin states, if given
def filterData(self,userInput,UI=False):
nullRecord = LevelRecord('NULL',0.0,"--",0.0,0.0,[0.0])
## no spin input
if (userInput == ''):
#print(self.data)
if (not self.data):
if(UI):
pass
## Prints a statement telling user than no file was found
#print("Warning:No data filtered/selected for "+ self.name +".")
self.data=[nullRecord]##Enters a dummy entry to file with something.
## Filter by spin states
else:
if (self.data): ## If self.data has data in it
newData = []
groundSt = self.data[0]
for i in range(1,len(self.data)):
#print(self.name,self.data[i])
## The spinMatchFinder will identify if the state is the desired spin
if any(spinMatchFinder(wantedString, self.data[i].jpi) for wantedString in userInput.split(',')):
newData.append(self.data[i])
self.data=newData[:]##changes data to the new data.
if (self.data):
self.data.insert(0,groundSt)
else:
if any(spinMatchFinder(wantedString,groundSt.jpi)for wantedString in userInput.split(',')):
self.data.append(groundSt)
else:
self.data = [nullRecord]##Enters a dummy entry to file with something.
else: ## If self.data is empty
if(UI):
## Prints a statement telling user than no file was found
pass
#print("Warning:No data filtered/selected for "+ self.name +".")#Prints a statement telling user than no file was found
self.data=[nullRecord]##Enters a dummy entry to file with something.
| |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for the reportlab.platypus.paragraphs module.
"""
__version__=''' $Id$ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys, os, unittest
from operator import truth
from reportlab.pdfbase.pdfmetrics import stringWidth, registerFont, registerFontFamily
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable, DocAssert
from reportlab.lib.colors import Color
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.utils import _className
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, PageBreak, NextPageTemplate
from reportlab.platypus import tableofcontents
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.paragraph import Paragraph, _getFragWords, _splitWord
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.rect(2.5*cm, 2.5*cm, 15*cm, 25*cm)
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
frame2 = Frame(2.5*cm, 2.5*cm, 310, 25*cm, id='F2')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [frame1], myMainPageFrame)
template1 = PageTemplate('special', [frame2], myMainPageFrame)
template2 = PageTemplate('template2', [Frame(395, 108, 165, 645, id='second2')])
self.addPageTemplates([template,template1,template2])
class ParagraphCorners(unittest.TestCase):
"some corner cases which should parse"
def check(self,text,bt = getSampleStyleSheet()['BodyText']):
try:
P = Paragraph(text,style=bt)
except:
raise AssertionError("'%s' should parse"%text)
def test0(self):
self.check('<para />')
self.check('<para/>')
self.check('\t\t\t\n\n\n<para />')
self.check('\t\t\t\n\n\n<para/>')
self.check('<para\t\t\t\t/>')
self.check('<para></para>')
self.check('<para> </para>')
self.check('\t\t\n\t\t\t <para> </para>')
def test1(self):
"This makes several special paragraphs."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
btN = ParagraphStyle('BodyTextTTNone',parent=bt,textTransform='none')
btL = ParagraphStyle('BodyTextTTLower',parent=bt,textTransform='lowercase')
btU = ParagraphStyle('BodyTextTTUpper',parent=bt,textTransform='uppercase')
btC = ParagraphStyle('BodyTextTTCapitalize',parent=bt,textTransform='capitalize')
story.append(Paragraph('''This should be ORDINARY text.''',style=bt))
story.append(Paragraph('''This should be ORDINARY text.''',style=btN))
story.append(Paragraph('''This should be LOWER text.''',style=btL))
story.append(Paragraph('''This should be upper text.''',style=btU))
story.append(Paragraph('''This should be cAPITALIZED text.''',style=btC))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>ORDINARY</b> text.''',style=bt))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>ORDINARY</b> text.''',style=btN))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>LOWER</b> text.''',style=btL))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>upper</b> text.''',style=btU))
story.append(Paragraph('''T<i>hi</i>s shoul<font color="red">d b</font>e <b>cAPITALIZED</b> text.''',style=btC))
doc = MyDocTemplate(outputfile('test_platypus_specialparagraphs.pdf'))
doc.multiBuild(story)
def test2(self):
'''CJK splitting in multi-frag case'''
style = ParagraphStyle('test', wordWrap = 'CJK')
p = Paragraph('bla <i>blub</i> '*130 , style)
aW,aH=439.275590551,121.88976378
w,h=p.wrap(aW,aH)
S=p.split(aW,aH)
assert len(S)==2, 'Multi frag CJK splitting failed'
w0,h0=S[0].wrap(aW,aH)
assert h0<=aH,'Multi-frag CJK split[0] has wrong height %s >= available %s' % (H0,aH)
w1,h1=S[1].wrap(aW,aH)
assert h0+h1==h, 'Multi-frag-CJK split[0].height(%s)+split[1].height(%s) don\'t add to original %s' % (h0,h1,h)
def test3(self):
'''compare CJK splitting in some edge cases'''
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.enums import TA_LEFT
sty = ParagraphStyle('A')
sty.fontSize = 15
sty.leading = sty.fontSize*1.2
sty.fontName = 'Courier'
sty.alignment = TA_LEFT
sty.wordWrap = 'CJK'
p0=Paragraph('ABCDEFGHIJKL]N',sty)
p1=Paragraph('AB<font color="red">C</font>DEFGHIJKL]N',sty)
canv = Canvas('test_platypus_paragraph_cjk3.pdf')
ix = len(canv._code)
aW = pdfmetrics.stringWidth('ABCD','Courier',15)
w,h=p0.wrap(aW,1000000)
y = canv._pagesize[1]-72-h
p0.drawOn(canv,72,y)
w,h=p1.wrap(aW,1000000)
y -= h+10
p1.drawOn(canv,72,y)
w,h=p0.wrap(aW*0.25-2,1000000)
y -= h+10
p0.drawOn(canv,72,y)
w,h=p1.wrap(aW/4.-2,1000000)
y -= h+10
p1.drawOn(canv,72,y)
assert canv._code[ix:]==['q', '1 0 0 1 72 697.8898 cm', 'q', '0 0 0 rg', 'BT 1 0 0 1 0 57 Tm /F2 15 Tf 18 TL (ABCD) Tj T* (EFGH) Tj T* (IJKL]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 615.8898 cm', 'q', 'BT 1 0 0 1 0 57 Tm 18 TL /F2 15 Tf 0 0 0 rg (AB) Tj 1 0 0 rg (C) Tj 0 0 0 rg (D) Tj T* (EFGH) Tj T* (IJKL]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 353.8898 cm', 'q', '0 0 0 rg', 'BT 1 0 0 1 0 237 Tm /F2 15 Tf 18 TL (A) Tj T* (B) Tj T* (C) Tj T* (D) Tj T* (E) Tj T* (F) Tj T* (G) Tj T* (H) Tj T* (I) Tj T* (J) Tj T* (K) Tj T* (L) Tj T* (]) Tj T* (N) Tj T* ET', 'Q', 'Q', 'q', '1 0 0 1 72 91.88976 cm', 'q', 'BT 1 0 0 1 0 237 Tm 18 TL /F2 15 Tf 0 0 0 rg (A) Tj T* (B) Tj T* 1 0 0 rg (C) Tj T* 0 0 0 rg (D) Tj T* (E) Tj T* (F) Tj T* (G) Tj T* (H) Tj T* (I) Tj T* (J) Tj T* (K) Tj T* (L) Tj T* (]) Tj T* (N) Tj T* ET', 'Q', 'Q']
canv.showPage()
canv.save()
class ParagraphSplitTestCase(unittest.TestCase):
"Test multi-page splitting of paragraphs (eyeball-test)."
def test0(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
text = '''If you imagine that the box of X's tothe left is
an image, what I want to be able to do is flow a
series of paragraphs around the image
so that once the bottom of the image is reached, then text will flow back to the
left margin. I know that it would be possible to something like this
using tables, but I can't see how to have a generic solution.
There are two examples of this in the demonstration section of the reportlab
site.
If you look at the "minimal" euro python conference brochure, at the end of the
timetable section (page 8), there are adverts for "AdSu" and "O'Reilly". I can
see how the AdSu one might be done generically, but the O'Reilly, unsure...
I guess I'm hoping that I've missed something, and that
it's actually easy to do using platypus.
'''
from reportlab.platypus.flowables import ParagraphAndImage, Image
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
story.append(ParagraphAndImage(Paragraph(text,bt),Image(gif)))
phrase = 'This should be a paragraph spanning at least three pages. '
description = ''.join([('%d: '%i)+phrase for i in range(250)])
story.append(ParagraphAndImage(Paragraph(description, bt),Image(gif),side='left'))
doc = MyDocTemplate(outputfile('test_platypus_paragraphandimage.pdf'))
doc.multiBuild(story)
def test1(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
h3 = styleSheet['Heading3']
bt = styleSheet['BodyText']
text = '''If you imagine that the box of X's tothe left is
an image, what I want to be able to do is flow a
series of paragraphs around the image
so that once the bottom of the image is reached, then text will flow back to the
left margin. I know that it would be possible to something like this
using tables, but I can't see how to have a generic solution.
There are two examples of this in the demonstration section of the reportlab
site.
If you look at the "minimal" euro python conference brochure, at the end of the
timetable section (page 8), there are adverts for "AdSu" and "O'Reilly". I can
see how the AdSu one might be done generically, but the O'Reilly, unsure...
I guess I'm hoping that I've missed something, and that
it's actually easy to do using platypus.We can do greek letters <greek>mDngG</greek>. This should be a
u with a dieresis on top <unichar code=0xfc/>="<unichar code="0xfc"/>" and this &#xfc;="ü" and this \\xc3\\xbc="\xc3\xbc". On the other hand this
should be a pound sign &pound;="£" and this an alpha &alpha;="α". You can have links in the page <link href="http://www.reportlab.com" color="blue">ReportLab</link> & <a href="http://www.reportlab.org" color="green">ReportLab.org</a>.
Use scheme "pdf:" to indicate an external PDF link, "http:", "https:" to indicate an external link eg something to open in
your browser. If an internal link begins with something that looks like a scheme, precede with "document:". Empty hrefs should be allowed ie <a href=""><a href="">test</a></a> should be allowed. <strike>This text should have a strike through it.</strike>
'''
from reportlab.platypus.flowables import ImageAndFlowables, Image
from reportlab.lib.testutils import testsFolder
gif = os.path.join(testsFolder,'pythonpowered.gif')
heading = Paragraph('This is a heading',h3)
story.append(ImageAndFlowables(Image(gif),[heading,Paragraph(text,bt)]))
phrase = 'This should be a paragraph spanning at least three pages. '
description = ''.join([('%d: '%i)+phrase for i in range(250)])
story.append(ImageAndFlowables(Image(gif),[heading,Paragraph(description, bt)],imageSide='left'))
story.append(NextPageTemplate('special'))
story.append(PageBreak())
VERA = ('Vera','VeraBd','VeraIt','VeraBI')
for v in VERA:
registerFont(TTFont(v,v+'.ttf'))
registerFontFamily(*(VERA[:1]+VERA))
story.append(ImageAndFlowables(
Image(gif,width=280,height=120),
Paragraph('''<font name="Vera">The <b>concept</b> of an <i>integrated</i> one <b><i>box</i></b> solution for <i><b>advanced</b></i> voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.</font>''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
story.append(ImageAndFlowables(
Image(gif,width=240,height=120),
Paragraph('''The concept of an integrated one box solution for advanced voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
story.append(PageBreak())
story.append(Paragraph('Image larger than the frame',h3))
story.append(ImageAndFlowables(
Image(gif,width=6*110,height=6*44),
Paragraph('''The concept of an integrated one box solution for advanced voice and
data applications began with the introduction of the IMACS. The
IMACS 200 carries on that tradition with an integrated solution
optimized for smaller port size applications that the IMACS could not
economically address. An array of the most popular interfaces and
features from the IMACS has been bundled into a small 2U chassis
providing the ultimate in ease of installation.''',
style=ParagraphStyle(
name="base",
fontName="Helvetica",
leading=12,
leftIndent=0,
firstLineIndent=0,
spaceBefore = 9.5,
fontSize=9.5,
)
),
imageSide='left',
)
)
text = '''With this clarification, an important property of these three types of
EC can be defined in such a way as to impose problems of phonemic and
morphological analysis. Another superficial similarity is the interest
in simulation of behavior, this analysis of a formative as a pair of
sets of features does not readily tolerate a stipulation to place the
constructions into these various categories. We will bring evidence in
favor of the following thesis: the earlier discussion of deviance is
not to be considered in determining the extended c-command discussed in
connection with (34). Another superficial similarity is the interest in
simulation of behavior, relational information may remedy and, at the
same time, eliminate a descriptive fact. There is also a different
approach to the [unification] problem, the descriptive power of the base
component delimits the traditional practice of grammarians.'''
gif = os.path.join(testsFolder,'pythonpowered.gif')
heading = Paragraph('This is a heading',h3)
story.append(NextPageTemplate('template2'))
story.append(PageBreak())
story.append(heading)
story.append(ImageAndFlowables(Image(gif,width=66,height=81),[Paragraph(text,bt)],imageSide='left',imageRightPadding=10))
doc = MyDocTemplate(outputfile('test_platypus_imageandflowables.pdf'),showBoundary=1)
doc.multiBuild(story)
class TwoFrameDocTemplate(BaseDocTemplate):
"Define a simple document with two frames per page."
def __init__(self, filename, **kw):
m = 2*cm
from reportlab.lib import pagesizes
PAGESIZE = pagesizes.landscape(pagesizes.A4)
cw, ch = (PAGESIZE[0]-2*m)/2., (PAGESIZE[1]-2*m)
ch -= 14*cm
f1 = Frame(m, m+0.5*cm, cw-0.75*cm, ch-1*cm, id='F1',
leftPadding=0, topPadding=0, rightPadding=0, bottomPadding=0,
showBoundary=True
)
f2 = Frame(cw+2.7*cm, m+0.5*cm, cw-0.75*cm, ch-1*cm, id='F2',
leftPadding=0, topPadding=0, rightPadding=0, bottomPadding=0,
showBoundary=True
)
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('template', [f1, f2])
self.addPageTemplates(template)
class SplitFrameParagraphTest(unittest.TestCase):
"Test paragraph split over two frames."
def test(self):
stylesheet = getSampleStyleSheet()
normal = stylesheet['BodyText']
normal.fontName = "Helvetica"
normal.fontSize = 12
normal.leading = 16
normal.alignment = TA_JUSTIFY
text = "Bedauerlicherweise ist ein Donaudampfschiffkapit\xc3\xa4n auch <font color='red'>nur</font> <font color='green'>ein</font> Dampfschiffkapit\xc3\xa4n."
tagFormat = '%s'
# strange behaviour when using next code line
# (same for '<a href="http://www.reportlab.org">%s</a>'
tagFormat = '<font color="red">%s</font>'
#text = " ".join([tagFormat % w for w in text.split()])
story = [Paragraph((text + " ") * 3, style=normal)]
from reportlab.lib import pagesizes
PAGESIZE = pagesizes.landscape(pagesizes.A4)
doc = TwoFrameDocTemplate(outputfile('test_paragraphs_splitframe.pdf'), pagesize=PAGESIZE)
doc.build(story)
class FragmentTestCase(unittest.TestCase):
"Test fragmentation of paragraphs."
def test0(self):
"Test empty paragraph."
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
text = ''
P = Paragraph(text, B)
frags = [f.text for f in P.frags]
assert frags == []
def test1(self):
"Test simple paragraph."
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
text = "X<font name=Courier>Y</font>Z"
P = Paragraph(text, B)
frags = [f.text for f in P.frags]
assert frags == ['X', 'Y', 'Z']
def test2(self):
'''test _splitWord'''
self.assertEqual(_splitWord(u'd\'op\u00e9ration',30,[30],0,'Helvetica',12),[u"d'op\xe9", u'ratio', u'n'])
self.assertEqual(_splitWord(b'd\'op\xc3\xa9ration',30,[30],0,'Helvetica',12),[u"d'op\xe9", u'ratio', u'n'])
class ULTestCase(unittest.TestCase):
"Test underlining and overstriking of paragraphs."
def testUl(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
from reportlab.platypus.flowables import AnchorFlowable
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
kw['showBoundary']=1
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
),
])
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
normal_sp = ParagraphStyle(name='normal_sp',parent=normal,alignment=TA_JUSTIFY,spaceBefore=12)
normal_just = ParagraphStyle(name='normal_just',parent=normal,alignment=TA_JUSTIFY)
normal_right = ParagraphStyle(name='normal_right',parent=normal,alignment=TA_RIGHT)
normal_center = ParagraphStyle(name='normal_center',parent=normal,alignment=TA_CENTER)
normal_indent = ParagraphStyle(name='normal_indent',firstLineIndent=0.5*inch,parent=normal)
normal_indent_lv_2 = ParagraphStyle(name='normal_indent_lv_2',firstLineIndent=1.0*inch,parent=normal)
texts = ['''Furthermore, a subset of <font size="14">English sentences</font> interesting on quite
independent grounds is not quite equivalent to a stipulation to place
the constructions into these various categories.''',
'''We will bring evidence in favor of
The following thesis: most of the methodological work in modern
linguistics can be defined in such a way as to impose problems of
phonemic and morphological analysis.''']
story =[]
a = story.append
a(Paragraph("This should <a href=\"#theEnd\" color=\"blue\"><a href=\"#theEnd\" color=\"blue\">jump</a></a> jump to the end!",style=normal))
a(XPreformatted("This should <a href=\"#theEnd\" color=\"blue\"><a href=\"#theEnd\" color=\"blue\">jump</a></a> jump to the end!",style=normal))
a(Paragraph("<a href=\"#theEnd\"><u><font color=\"blue\">ditto</font></u></a>",style=normal))
a(XPreformatted("<a href=\"#theEnd\"><u><font color=\"blue\">ditto</font></u></a>",style=normal))
a(Paragraph("This <font color='CMYKColor(0,0.6,0.94,0)'>should</font> <a href=\"#thePenultimate\" color=\"blue\"><a href=\"#thePenultimate\" color=\"blue\">jump</a></a> jump to the penultimate page!",style=normal))
a(Paragraph("This should <a href=\"#theThird\" color=\"blue\"><a href=\"#theThird\" color=\"blue\">jump</a></a> jump to a justified para!",style=normal))
a(Paragraph("This should <a href=\"#theFourth\" color=\"blue\"><a href=\"#theFourth\" color=\"blue\">jump</a></a> jump to an indented para!",style=normal))
for mode in (0,1):
text0 = texts[0]
text1 = texts[1]
if mode:
text0 = text0.replace('English sentences','<b>English sentences</b>').replace('quite equivalent','<i>quite equivalent</i>')
text1 = text1.replace('the methodological work','<b>the methodological work</b>').replace('to impose problems','<i>to impose problems</i>')
for t in ('u','strike'):
for n in range(6):
for s in (normal,normal_center,normal_right,normal_just,normal_indent, normal_indent_lv_2):
for autoLeading in ('','min','max'):
if n==4 and s==normal_center and t=='strike' and mode==1:
a(Paragraph("<font color=green>The second jump at the beginning should come here <a name=\"thePenultimate\"/><a name=\"thePenultimate\"/>!</font>",style=normal))
elif n==4 and s==normal_just and t=='strike' and mode==1:
a(Paragraph("<font color=green>The third jump at the beginning should come just below here to a paragraph with just an a tag in it!</font>",style=normal))
a(Paragraph("<a name=\"theThird\"/>",style=normal))
elif n==4 and s==normal_indent and t=='strike' and mode==1:
a(Paragraph("<font color=green>The fourth jump at the beginning should come just below here!</font>",style=normal))
a(AnchorFlowable('theFourth'))
a(Paragraph('n=%d style=%s(autoLeading=%s) tag=%s'%(n,s.name,autoLeading,t),style=normal_sp))
a(Paragraph('<para autoleading="%s">%s<%s>%s</%s>. %s <%s>%s</%s>. %s</para>' % (
autoLeading,
(s==normal_indent_lv_2 and '<seq id="document" inc="no"/>.<seq id="document_lv_2"/>' or ''),
t,' '.join((n+1)*['A']),t,text0,t,' '.join((n+1)*['A']),t,text1),
style=s))
a(Paragraph("The jump at the beginning should come here <a name=\"theEnd\"/><a name=\"theEnd\"/>!",style=normal))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_ul.pdf'))
doc.build(story)
class AutoLeadingTestCase(unittest.TestCase):
"Test underlining and overstriking of paragraphs."
def testAutoLeading(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
from reportlab.platypus.flowables import AnchorFlowable
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
kw['showBoundary']=1
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
),
])
from reportlab.lib.testutils import testsFolder
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
normal_sp = ParagraphStyle(name='normal_sp',parent=normal,alignment=TA_JUSTIFY,spaceBefore=12)
texts = ['''Furthermore, a subset of <font size="14">English sentences</font> interesting on quite
independent grounds is not quite equivalent to a stipulation to place
<font color="blue">the constructions <img src="%(testsFolder)s/../docs/images/testimg.gif"/> into these various categories.</font>'''%dict(testsFolder=testsFolder),
'''We will bring <font size="18">Ugly Things</font> in favor of
The following thesis: most of the methodological work in Modern
Linguistics can be <img src="%(testsFolder)s/../docs/images/testimg.gif" valign="baseline" /> defined in such <img src="%(testsFolder)s/../docs/images/testimg.gif" valign="10" /> a way as to impose problems of
phonemic and <u>morphological <img src="%(testsFolder)s/../docs/images/testimg.gif" valign="top"/> </u> analysis.'''%dict(testsFolder=testsFolder)]
story =[]
a = story.append
t = 'u'
n = 1
for s in (normal,normal_sp):
for autoLeading in ('','min','max'):
a(Paragraph('style=%s(autoLeading=%s)'%(s.name,autoLeading),style=normal_sp))
a(Paragraph('<para autoleading="%s"><%s>%s</%s>. %s <%s>%s</%s>. %s</para>' % (
autoLeading,
t,' '.join((n+1)*['A']),t,texts[0],t,' '.join((n+1)*['A']),t,texts[1]),
style=s))
a(Paragraph('''<img src="%(testsFolder)s/../docs/images/testimg.gif" valign="top"/> image is very first thing in the line.'''%dict(testsFolder=testsFolder), style=normal))
a(Paragraph('some text.... some more.... some text.... some more....', normal))
a(Paragraph('<img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="0.19in" /> some text <br /> '%dict(testsFolder=testsFolder), normal))
a(Paragraph('some text.... some more.... some text.... some more....', normal))
a(Paragraph('<img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="0.19in" /> <br /> '%dict(testsFolder=testsFolder), normal))
a(Paragraph('some text.... some more.... some text.... some more....', normal))
#Volker Haas' valign tests
fmt = '''<font color="red">%(valign)s</font>: Furthermore, a <u>subset</u> <strike>of</strike> <font size="14">English sentences</font> interesting on quite
independent grounds is not quite equivalent to a stipulation to place <img src="%(testsFolder)s/../docs/images/redsquare.png" width="0.5in" height="0.5in" valign="%(valign)s"/>
the constructions into these <u>various</u> categories. We will bring <font size="18">Ugly Things</font> in favor of
The following thesis: most of the methodological work in Modern
Linguistics can be defined in such a way as to impose problems of
phonemic and <u>morphological</u> <strike>analysis</strike>.'''
p_style= ParagraphStyle('Normal')
p_style.autoLeading = 'max'
for valign in (
'baseline',
'sub',
'super',
'top',
'text-top',
'middle',
'bottom',
'text-bottom',
'0%',
'2in',
):
a(Paragraph(fmt % dict(valign=valign,testsFolder=testsFolder),p_style))
a(XPreformatted(fmt % dict(valign=valign,testsFolder=testsFolder),p_style))
a(Paragraph('<br/><b>Some Paragraph tests of <img width="x%" height="x%"</b>...', normal))
a(Paragraph('H=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="10%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=100%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=100%% W=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="10%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=100%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('H=50%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('<br/><b>Some XPreformatted tests of <img width="x%" height="x%"</b>...', normal))
a(XPreformatted('H=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="10%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=100%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=100%% W=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="10%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=100%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="100%%" />'%dict(testsFolder=testsFolder), normal))
a(XPreformatted('H=50%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="50%%" />'%dict(testsFolder=testsFolder), normal))
a(Paragraph('<br/><b>Some CJK Paragraph tests of <img width="x%" height="x%"</b>...', normal))
normalCJK = ParagraphStyle('normalCJK', parent=normal, wordWrap = 'CJK')
a(Paragraph('H=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="10%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="50%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=100%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="0.57in" height="100%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=100%% W=10%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="10%%" height="100%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=100%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="100%%" />'%dict(testsFolder=testsFolder), normalCJK))
a(Paragraph('H=50%% W=50%% <img src="%(testsFolder)s/../docs/images/testimg.gif" width="50%%" height="50%%" />'%dict(testsFolder=testsFolder), normalCJK))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_autoleading.pdf'))
doc.build(story)
class JustifyTestCase(unittest.TestCase):
"Test justification of paragraphs."
def testUl(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
),
])
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
normal_just = ParagraphStyle(name='normal_just',parent=normal,alignment=TA_JUSTIFY,spaceAfter=12)
text0 = '''Furthermore, a subset of English sentences interesting on quite
independent grounds is not quite equivalent to a stipulation to place
the constructions into these various categories. We will bring evidence in favor of
The following thesis: most of the methodological work in modern
linguistics can be defined in such a way as to impose problems of
phonemic and morphological analysis.'''
story =[]
a = story.append
for mode in (0,1,2,3,4,5,6,7):
text = text0
paraStyle = normal_just
if mode==1:
text = text.replace('English sentences','<b>English sentences</b>').replace('quite equivalent','<i>quite equivalent</i>')
text = text.replace('the methodological work','<b>the methodological work</b>').replace('to impose problems','<i>to impose problems</i>')
a(Paragraph('Justified paragraph in normal/bold/italic font',style=normal))
elif mode==2:
text = '<b>%s</b>' % text
a(Paragraph('Justified paragraph in bold font',style=normal))
elif mode==3:
text = '<i>%s</i>' % text
a(Paragraph('Justified paragraph in italic font',style=normal))
elif mode==4:
text = text.replace('English ','English ').replace('quite ','quite ')
text = text.replace(' methodological',' methodological').replace(' impose',' impose')
a(Paragraph('Justified paragraph in normal font & some hard spaces',style=normal))
elif mode in (5,6,7):
text = text.replace('as to impose','<br/>as to impose').replace(' most of the','<br/>most of the')
text = text.replace(' grounds','<br/>grounds').replace(' various','<br/>various')
if mode in (6,7):
msg = []
msg.append('justifyBreaks=1')
paraStyle = paraStyle.clone('paraStyle6',paraStyle,justifyBreaks=1)
if mode==7:
msg.append('justifyLastLine=3')
paraStyle = paraStyle.clone('paraStyle7',paraStyle,justifyLastLine=3)
msg = '(%s) ' % (' '.join(msg))
else:
a(PageBreak())
msg = ' '
a(Paragraph('Justified%swith some <br/> tags' % msg,style=normal))
else:
a(Paragraph('Justified paragraph in normal font',style=normal))
a(Paragraph(text,style=paraStyle))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_just.pdf'))
doc.build(story)
def testAutoPageTemplate(self):
from reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, PageBegin
from reportlab.lib.units import inch
class onPage:
def __init__(self,label):
self.label = label
def __call__(self,canv,doc):
canv.drawString(72,72,'This is pageTemplate(%s)' % (self.label,))
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(
[
PageTemplate('normal',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
onPage = onPage('normal'),
),
PageTemplate('auto',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
onPage = onPage('auto'),
autoNextPageTemplate = 'autoFollow',
),
PageTemplate('autoFollow',
[Frame(inch, inch, 6.27*inch, 9.69*inch, id='first',topPadding=0,rightPadding=0,leftPadding=0,bottomPadding=0,showBoundary=ShowBoundaryValue(color="red"))],
onPage = onPage('autoFollow'),
),
])
styleSheet = getSampleStyleSheet()
normal = ParagraphStyle(name='normal',fontName='Times-Roman',fontSize=12,leading=1.2*12,parent=styleSheet['Normal'])
story =[]
a = story.append
a(Paragraph('should be on page template normal', normal))
a(NextPageTemplate('auto'))
a(PageBreak())
a(Paragraph('should be on page template auto', normal))
a(PageBreak())
a(DocAssert('doc.pageTemplate.id=="autoFollow"','expected doc.pageTemplate.id=="autoFollow"'))
a(Paragraph('should be on page template autoFollow 1', normal))
a(PageBreak())
a(Paragraph('should be on page template autoFollow 2', normal))
doc = MyDocTemplate(outputfile('test_platypus_paragraphs_AutoNextPageTemplate.pdf'))
doc.build(story)
#noruntests
def makeSuite():
return makeSuiteForClasses(ParagraphCorners,SplitFrameParagraphTest,FragmentTestCase, ParagraphSplitTestCase, ULTestCase, JustifyTestCase,
AutoLeadingTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| |
import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = pd.DataFrame(d)
return df
def ExcitedStateSummary(self, results, fname="es_smry", fmt="csv",
ground_state=False):
""" Exports energy related excited state quantities to file
Parameters
----------
results : CCParser.ParseContainer
Parsing container that holds parsed values.
fname : string
Filename prefix.
fmt : string
Output format ('csv', 'xlsx'/'xls' or 'df' for pandas.DataFrame).
ground_state : bool
Whether to include an empty line in the table for the ground state.
"""
if fmt not in Exporter.valid_formats:
raise ValueError("File format '{0:}' not recognized or supported!".format(fmt))
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
d = {}
# (1) Excitation energies (default minimum)
#if hasattr(results, VarNames.exc_energy_rel):
d[VarNames.exc_energy_rel] = getattr(results, VarNames.exc_energy_rel).data
n_states = len(d[VarNames.exc_energy_rel])
# (2) Oscillator strengths
if hasattr(results, VarNames.osc_str):
d[VarNames.osc_str] = getattr(results, VarNames.osc_str).data
# (3) Amplitudes
if hasattr(results, VarNames.amplitudes):
ampl = getattr(results, VarNames.amplitudes)
pieces = [a.to_dataframe() for a in ampl]
key = [x for x in range(1,len(pieces)+1)]
amp_df = pd.concat(pieces, keys=key, names=["State", "Row ID"])
# prepare MultiIndex (there has to be a better way to do that...)
arrays = [[x for x in range(1, n_states+1)],
[0 for x in range(n_states)]]
tuples = list(zip(*arrays))# asterisk unpacks
df1 = pd.DataFrame(d)
df1.index = pd.MultiIndex.from_tuples(tuples, names=["State", "Row ID"])
df = pd.concat([df1, amp_df], axis=1)
# add row to MultiIndex, see https://stackoverflow.com/q/24917700
if ground_state:
df.loc[(0,0),:] = np.nan
df.sort_index(level=0, inplace=True)
# EXPORT TO FILE or dataframe
fout = fname + "." + fmt
if fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == ("xlsx" or "xls"):
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
elif fmt.lower() == ("dataframe" or "df"):
return df
def ReducedWeights(self, results, nbsfA, extern=None, fmt="print",
fname="AmplAnl", silent=False):
""" Calculate reduced weights based on fragment information.
The reduced weight for a single excitation :math:`i \\rightarrow a` is defined as
:math:`v_{i}^{a} = 0.5\\cdot(c_{i,A}^{2} + c_{a,A}^{2})\\cdot w_{i}^{a}`, with
c and w being the molecular orbital coefficient and transition weight,
respectively.
The MO coefficients from the output first have to be transformed to an
orthonormal basis.
Parameters
----------
results : CCParser.ParseContainer
Container object which contains excited state amplitudes
nbsfA : int
Number of basis functions on System A (assumes system A comes first!)
extern : CCParser.ParseContainer
Optional second container which contains orthonormalisation matrix and/or MO coefficients
fmt : string
Output format. Available are "print", "dataframe", "xlsx" or "csv"
fname : string
Output file name (basename only).
silent : bool
Whether to ignore lengthy printouts.
"""
# consistency
has_extern = True if extern != None else False
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
if not has_extern and not hasattr(results, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif has_extern and not hasattr(extern, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif not has_extern and not hasattr(results, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif has_extern and not hasattr(extern, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif not hasattr(results, VarNames.amplitudes):
raise AttributeError("Could not find amplitudes! Were they parsed?")
elif not hasattr(results, VarNames.n_bas):
raise AttributeError("Could not find number of basis functions! Was it parsed?")
else:
# (1) Orthonormalization matrix, hardcoded last
X = getattr(results, VarNames.orthonorm_matrix).get_last() if not \
has_extern else getattr(extern, VarNames.orthonorm_matrix).get_last()
X_inv = np.linalg.inv(X)
# (2) MO coeffiecients, hardcoded last
C = getattr(results, VarNames.mo_coefficients).get_last() if not \
has_extern else getattr(extern, VarNames.mo_coefficients).get_last()
C_prime = C * X_inv # Szabo, Ostlund, page 142
max_mo = C.shape[0]
# (3) Amplitudes
ampl = getattr(results, VarNames.amplitudes)
n_states = len(ampl)
# (4) Number of basis functions
nbsf = getattr(results, VarNames.n_bas).get_last()
# (4) Output variables
sum_weights = [0 for i in range(n_states)]
sum_redweights = [0 for i in range(n_states)]
# --------------
sos_A = [0 for a in range(C_prime.shape[0])]
sos_B = [0 for a in range(C_prime.shape[0])]
for c, vect in enumerate(C_prime):
for n in range(nbsf):
if n < nbsfA:
sos_A[c] += vect[0,n]**2
else:
sos_B[c] += vect[0,n]**2
for i,a in enumerate(ampl):#state
for t in range(len(a.occ)):#transition
if max(a.virt[t]) > max_mo:
if not silent:
print("State {0:>2d}: Omitting transition with weight \
{1:.1%} due to missing MO coefficients.".format(i+1, a.weights[t]))
continue
if len(a.occ[t]) == 1:#single amplitudes
rw = 0.5*(sos_A[a.occ[t][0]-1] + sos_A[a.virt[t][0]-1]) * a.weights[t]
elif len(a.occ[t]) == 2:#double amplitudes
rw = 0.25*(sos_A[a.occ[t][0]-1] + sos_A[a.occ[t][1]-1] +
sos_A[a.virt[t][0]-1] + sos_A[a.virt[t][1]-1]
)*a.weights[t]
else:
raise IndexError("Currently no more than double \
amplitudes are supported!")
sum_weights[i] += a.weights[t]
sum_redweights[i] += rw
#----------------
# Export as
fout = fname + "." + fmt
d = {"State": [i+1 for i in range(n_states)],
"sum_weight" : sum_weights,
"sum_red_weight" : sum_redweights}
df = pd.DataFrame(d)
df = df.assign(diff=df["sum_weight"]-df["sum_red_weight"],
ratio=df["sum_red_weight"]/df["sum_weight"])
if fmt == "print":
print("State | Sum(W) | Sum(P) | Sum(W) - Sum(P) | ratio P/W |\n",50*"-")
for i in range(n_states):
print(" S{0:>2d} | {1:.3f} | {2:.3f} | {3:15.3f} | {4:.1%}".format(
i+1, sum_weights[i], sum_redweights[i], sum_weights[i] -
sum_redweights[i], sum_redweights[i]/sum_weights[i]))
elif fmt == "dataframe":
return df
elif fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == "xlsx" or fmt == "xls":
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
else:
raise ValueError("Output format not supported!")
def MO_Molden(self, results, atom_basis, fname="molecular_orbitals",
tmp_5d=True):
""" Writes molecular orbitals to a molden file.
Expects molecular geometry in Angstrom.
More information on the molden format at
http://www.cmbi.ru.nl/molden/molden_format.html
Parameters
----------
results : CCParser.ParseContainer
Container object which holds MO coefficients.
exponents : dict
Dictionary mapping GTO exponents/coefficients to atoms. Expected
format of dictionary entry is list of strings.
fname : string
Output file name.
"""
from .QCBase import PeriodicTable
import re
C = results.C.get_last()
xyz = results.xyz.get_last()
en = results.mo_energies.get_last()
PeTa = PeriodicTable()
#TODO: Permutator needed in case of different formats (Molcas, Gaussian)
with open(fname+".molden", "w") as out:
out.write("[Molden Format]\n")
# write XYZ
out.write("[Atoms] (Angs)\n")
for i,atom in enumerate(xyz):
num = PeTa.get_atomic_num(atom[0])
out.write("{0:>3}{1:7d}{2:5d}".format(atom[0], i+1, num))
out.write("".join("{0:16.8f}".format(c) for c in atom[1:])+"\n")
# write basis exponents
out.write("[GTO]\n")
for n in range(len(xyz)):
# atom sequence number, 0
out.write("{0:d}{1:5d}\n".format(n+1, 0))
symb = xyz[n][0].upper()
#a = atom.upper()
basis = atom_basis[symb]
for coeff in basis:
# shell label, number of primitives, 1.00
if re.search(r"[SDPF]", coeff[0]):
out.write("{0:}{1:6d}{2:12.6f}\n".format(
coeff[0], int(coeff[1]), float(coeff[2])))
# exponent, contraction coefficient
else:
out.write("{0:18.8e}{1:18.8e}\n".format(
float(coeff[0]), float(coeff[1])))
out.write("\n")
for imo in range(C.shape[0]):#assumes counting from MO 1 !!
out.write("[MO]\nSym=X\n")
if imo < en.n_occ:#occupied
out.write("Ene={0:12.6f}\n".format(en.occ[imo]))
out.write("Spin=alpha\n")
out.write("Occup=1\n")
else:#virtual
out.write("Ene={0:12.6f}\n".format(en.virt[imo]))
out.write("Spin=alpha\n")
out.write("Occup=0\n")
for i in range(C.shape[1]):
out.write("{0:6d}{1: 22.12e}\n".format(i+1,C[imo, i]))
if tmp_5d:
out.write("[5D]\n")
print("MOs written to Molden file.")
| |
# coding: utf-8
# pylint: disable=no-member, invalid-name, protected-access, no-self-use
# pylint: disable=too-many-branches, too-many-arguments, no-self-use
# pylint: disable=too-many-lines, arguments-differ
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
from ... import symbol, ndarray
from ...base import string_types, numeric_types
from ..block import Block, HybridBlock
from ..utils import _indent
from .. import tensor_types
def _cells_state_info(cells, batch_size):
return sum([c.state_info(batch_size) for c in cells], [])
def _cells_begin_state(cells, **kwargs):
return sum([c.begin_state(**kwargs) for c in cells], [])
def _get_begin_state(cell, F, begin_state, inputs, batch_size):
if begin_state is None:
if F is ndarray:
ctx = inputs.context if isinstance(inputs, tensor_types) else inputs[0].context
with ctx:
begin_state = cell.begin_state(func=F.zeros, batch_size=batch_size)
else:
begin_state = cell.begin_state(func=F.zeros, batch_size=batch_size)
return begin_state
def _format_sequence(length, inputs, layout, merge, in_layout=None):
assert inputs is not None, \
"unroll(inputs=None) has been deprecated. " \
"Please create input variables outside unroll."
axis = layout.find('T')
batch_axis = layout.find('N')
batch_size = 0
in_axis = in_layout.find('T') if in_layout is not None else axis
if isinstance(inputs, symbol.Symbol):
F = symbol
if merge is False:
assert len(inputs.list_outputs()) == 1, \
"unroll doesn't allow grouped symbol as input. Please convert " \
"to list with list(inputs) first or let unroll handle splitting."
inputs = list(symbol.split(inputs, axis=in_axis, num_outputs=length,
squeeze_axis=1))
elif isinstance(inputs, ndarray.NDArray):
F = ndarray
batch_size = inputs.shape[batch_axis]
if merge is False:
assert length is None or length == inputs.shape[in_axis]
inputs = ndarray.split(inputs, axis=in_axis, num_outputs=inputs.shape[in_axis],
squeeze_axis=1)
else:
assert length is None or len(inputs) == length
if isinstance(inputs[0], symbol.Symbol):
F = symbol
else:
F = ndarray
batch_size = inputs[0].shape[batch_axis]
if merge is True:
inputs = [F.expand_dims(i, axis=axis) for i in inputs]
inputs = F.concat(*inputs, dim=axis)
in_axis = axis
if isinstance(inputs, tensor_types) and axis != in_axis:
inputs = F.swapaxes(inputs, dim1=axis, dim2=in_axis)
return inputs, axis, F, batch_size
class RecurrentCell(Block):
"""Abstract base class for RNN cells
Parameters
----------
prefix : str, optional
Prefix for names of `Block`s
(this prefix is also used for names of weights if `params` is `None`
i.e. if `params` are being created and not reused)
params : Parameter or None, optional
Container for weight sharing between cells.
A new Parameter container is created if `params` is `None`.
"""
def __init__(self, prefix=None, params=None):
super(RecurrentCell, self).__init__(prefix=prefix, params=params)
self._modified = False
self.reset()
def __repr__(self):
s = '{name}({mapping}'
if hasattr(self, '_activation'):
s += ', {_activation}'
s += ')'
mapping = ('{_input_size} -> {_hidden_size}'.format(**self.__dict__) if self._input_size
else self._hidden_size)
return s.format(name=self.__class__.__name__,
mapping=mapping,
**self.__dict__)
def reset(self):
"""Reset before re-using the cell for another graph."""
self._init_counter = -1
self._counter = -1
def state_info(self, batch_size=0):
"""shape and layout information of states"""
raise NotImplementedError()
@property
def _curr_prefix(self):
return '%st%d_'%(self.prefix, self._counter)
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var etc`. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
batch_size: int, default 0
Only required for NDArray API. Size of the batch ('N' in layout)
dimension of input.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info(batch_size):
self._init_counter += 1
if info is not None:
info.update(kwargs)
else:
info = kwargs
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**info)
states.append(state)
return states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
"""Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
"""
self.reset()
inputs, _, F, batch_size = _format_sequence(length, inputs, layout, False)
begin_state = _get_begin_state(self, F, begin_state, inputs, batch_size)
states = begin_state
outputs = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
outputs, _, _, _ = _format_sequence(length, outputs, layout, merge_outputs)
return outputs, states
#pylint: disable=no-self-use
def _get_activation(self, F, inputs, activation, **kwargs):
"""Get activation function. Convert if is string"""
if isinstance(activation, string_types):
return F.Activation(inputs, act_type=activation, **kwargs)
else:
return activation(inputs, **kwargs)
def forward(self, inputs, states):
"""Unrolls the recurrent cell for one time step.
Parameters
----------
inputs : sym.Variable
Input symbol, 2D, of shape (batch_size * num_units).
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
This can be used as an input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
"""
# pylint: disable= arguments-differ
self._counter += 1
return super(RecurrentCell, self).forward(inputs, states)
class HybridRecurrentCell(RecurrentCell, HybridBlock):
"""HybridRecurrentCell supports hybridize."""
def __init__(self, prefix=None, params=None):
super(HybridRecurrentCell, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, x, *args, **kwargs):
raise NotImplementedError
class RNNCell(HybridRecurrentCell):
"""Simple recurrent neural network cell.
Parameters
----------
hidden_size : int
Number of units in output symbol
activation : str or Symbol, default 'tanh'
Type of activation function.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
prefix : str, default 'rnn_'
Prefix for name of `Block`s
(and name of weight if params is `None`).
params : Parameter or None
Container for weight sharing between cells.
Created if `None`.
"""
def __init__(self, hidden_size, activation='tanh',
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
input_size=0, prefix=None, params=None):
super(RNNCell, self).__init__(prefix=prefix, params=params)
self._hidden_size = hidden_size
self._activation = activation
self._input_size = input_size
self.i2h_weight = self.params.get('i2h_weight', shape=(hidden_size, input_size),
init=i2h_weight_initializer,
allow_deferred_init=True)
self.h2h_weight = self.params.get('h2h_weight', shape=(hidden_size, hidden_size),
init=h2h_weight_initializer,
allow_deferred_init=True)
self.i2h_bias = self.params.get('i2h_bias', shape=(hidden_size,),
init=i2h_bias_initializer,
allow_deferred_init=True)
self.h2h_bias = self.params.get('h2h_bias', shape=(hidden_size,),
init=h2h_bias_initializer,
allow_deferred_init=True)
def state_info(self, batch_size=0):
return [{'shape': (batch_size, self._hidden_size), '__layout__': 'NC'}]
def _alias(self):
return 'rnn'
def hybrid_forward(self, F, inputs, states, i2h_weight,
h2h_weight, i2h_bias, h2h_bias):
name = self._curr_prefix
i2h = F.FullyConnected(data=inputs, weight=i2h_weight, bias=i2h_bias,
num_hidden=self._hidden_size,
name='%si2h'%name)
h2h = F.FullyConnected(data=states[0], weight=h2h_weight, bias=h2h_bias,
num_hidden=self._hidden_size,
name='%sh2h'%name)
output = self._get_activation(F, i2h + h2h, self._activation,
name='%sout'%name)
return output, [output]
class LSTMCell(HybridRecurrentCell):
"""Long-Short Term Memory (LSTM) network cell.
Parameters
----------
hidden_size : int
Number of units in output symbol.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default 'lstmbias'
Initializer for the bias vector. By default, bias for the forget
gate is initialized to 1 while all other biases are initialized
to zero.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
prefix : str, default 'lstm_'
Prefix for name of `Block`s
(and name of weight if params is `None`).
params : Parameter or None
Container for weight sharing between cells.
Created if `None`.
"""
def __init__(self, hidden_size,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
input_size=0, prefix=None, params=None):
super(LSTMCell, self).__init__(prefix=prefix, params=params)
self._hidden_size = hidden_size
self._input_size = input_size
self.i2h_weight = self.params.get('i2h_weight', shape=(4*hidden_size, input_size),
init=i2h_weight_initializer,
allow_deferred_init=True)
self.h2h_weight = self.params.get('h2h_weight', shape=(4*hidden_size, hidden_size),
init=h2h_weight_initializer,
allow_deferred_init=True)
self.i2h_bias = self.params.get('i2h_bias', shape=(4*hidden_size,),
init=i2h_bias_initializer,
allow_deferred_init=True)
self.h2h_bias = self.params.get('h2h_bias', shape=(4*hidden_size,),
init=h2h_bias_initializer,
allow_deferred_init=True)
def state_info(self, batch_size=0):
return [{'shape': (batch_size, self._hidden_size), '__layout__': 'NC'},
{'shape': (batch_size, self._hidden_size), '__layout__': 'NC'}]
def _alias(self):
return 'lstm'
def hybrid_forward(self, F, inputs, states, i2h_weight,
h2h_weight, i2h_bias, h2h_bias):
name = self._curr_prefix
i2h = F.FullyConnected(data=inputs, weight=i2h_weight, bias=i2h_bias,
num_hidden=self._hidden_size*4,
name='%si2h'%name)
h2h = F.FullyConnected(data=states[0], weight=h2h_weight, bias=h2h_bias,
num_hidden=self._hidden_size*4,
name='%sh2h'%name)
gates = i2h + h2h
slice_gates = F.SliceChannel(gates, num_outputs=4,
name="%sslice"%name)
in_gate = F.Activation(slice_gates[0], act_type="sigmoid",
name='%si'%name)
forget_gate = F.Activation(slice_gates[1], act_type="sigmoid",
name='%sf'%name)
in_transform = F.Activation(slice_gates[2], act_type="tanh",
name='%sc'%name)
out_gate = F.Activation(slice_gates[3], act_type="sigmoid",
name='%so'%name)
next_c = F._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate'%name)
next_h = F._internal._mul(out_gate, F.Activation(next_c, act_type="tanh"),
name='%sout'%name)
return next_h, [next_h, next_c]
class GRUCell(HybridRecurrentCell):
"""Gated Rectified Unit (GRU) network cell.
Note: this is an implementation of the cuDNN version of GRUs
(slight modification compared to Cho et al. 2014).
Parameters
----------
hidden_size : int
Number of units in output symbol.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
prefix : str, default 'gru_'
prefix for name of `Block`s
(and name of weight if params is `None`).
params : Parameter or None
Container for weight sharing between cells.
Created if `None`.
"""
def __init__(self, hidden_size,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
input_size=0, prefix=None, params=None):
super(GRUCell, self).__init__(prefix=prefix, params=params)
self._hidden_size = hidden_size
self._input_size = input_size
self.i2h_weight = self.params.get('i2h_weight', shape=(3*hidden_size, input_size),
init=i2h_weight_initializer,
allow_deferred_init=True)
self.h2h_weight = self.params.get('h2h_weight', shape=(3*hidden_size, hidden_size),
init=h2h_weight_initializer,
allow_deferred_init=True)
self.i2h_bias = self.params.get('i2h_bias', shape=(3*hidden_size,),
init=i2h_bias_initializer,
allow_deferred_init=True)
self.h2h_bias = self.params.get('h2h_bias', shape=(3*hidden_size,),
init=h2h_bias_initializer,
allow_deferred_init=True)
def state_info(self, batch_size=0):
return [{'shape': (batch_size, self._hidden_size), '__layout__': 'NC'}]
def _alias(self):
return 'gru'
def hybrid_forward(self, F, inputs, states, i2h_weight,
h2h_weight, i2h_bias, h2h_bias):
# pylint: disable=too-many-locals
name = self._curr_prefix
prev_state_h = states[0]
i2h = F.FullyConnected(data=inputs,
weight=i2h_weight,
bias=i2h_bias,
num_hidden=self._hidden_size * 3,
name="%si2h" % name)
h2h = F.FullyConnected(data=prev_state_h,
weight=h2h_weight,
bias=h2h_bias,
num_hidden=self._hidden_size * 3,
name="%sh2h" % name)
i2h_r, i2h_z, i2h = F.SliceChannel(i2h, num_outputs=3, name="%si2h_slice" % name)
h2h_r, h2h_z, h2h = F.SliceChannel(h2h, num_outputs=3, name="%sh2h_slice" % name)
reset_gate = F.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%sr_act" % name)
update_gate = F.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%sz_act" % name)
next_h_tmp = F.Activation(i2h + reset_gate * h2h, act_type="tanh",
name="%sh_act" % name)
next_h = F._internal._plus((1. - update_gate) * next_h_tmp, update_gate * prev_state_h,
name='%sout' % name)
return next_h, [next_h]
class SequentialRNNCell(RecurrentCell):
"""Sequentially stacking multiple RNN cells."""
def __init__(self, prefix=None, params=None):
super(SequentialRNNCell, self).__init__(prefix=prefix, params=params)
def __repr__(self):
s = '{name}(\n{modstr}\n)'
return s.format(name=self.__class__.__name__,
modstr='\n'.join(['({i}): {m}'.format(i=i, m=_indent(m.__repr__(), 2))
for i, m in enumerate(self._children)]))
def add(self, cell):
"""Appends a cell into the stack.
Parameters
----------
cell : rnn cell
"""
self.register_child(cell)
def state_info(self, batch_size=0):
return _cells_state_info(self._children, batch_size)
def begin_state(self, **kwargs):
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._children, **kwargs)
def __call__(self, inputs, states):
self._counter += 1
next_states = []
p = 0
for cell in self._children:
assert not isinstance(cell, BidirectionalCell)
n = len(cell.state_info())
state = states[p:p+n]
p += n
inputs, state = cell(inputs, state)
next_states.append(state)
return inputs, sum(next_states, [])
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _, F, batch_size = _format_sequence(length, inputs, layout, None)
num_cells = len(self._children)
begin_state = _get_begin_state(self, F, begin_state, inputs, batch_size)
p = 0
next_states = []
for i, cell in enumerate(self._children):
n = len(cell.state_info())
states = begin_state[p:p+n]
p += n
inputs, states = cell.unroll(length, inputs=inputs, begin_state=states, layout=layout,
merge_outputs=None if i < num_cells-1 else merge_outputs)
next_states.extend(states)
return inputs, next_states
def hybrid_forward(self, *args, **kwargs):
raise NotImplementedError
class DropoutCell(HybridRecurrentCell):
"""Applies dropout on input.
Parameters
----------
dropout : float
Percentage of elements to drop out, which
is 1 - percentage to retain.
"""
def __init__(self, dropout, prefix=None, params=None):
super(DropoutCell, self).__init__(prefix, params)
assert isinstance(dropout, numeric_types), "dropout probability must be a number"
self.dropout = dropout
def __repr__(self):
s = '{name}(p = {dropout})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
def state_info(self, batch_size=0):
return []
def _alias(self):
return 'dropout'
def hybrid_forward(self, F, inputs, states):
if self.dropout > 0:
inputs = F.Dropout(data=inputs, p=self.dropout)
return inputs, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, _, F, _ = _format_sequence(length, inputs, layout, merge_outputs)
if isinstance(inputs, tensor_types):
return self.hybrid_forward(F, inputs, begin_state if begin_state else [])
else:
return super(DropoutCell, self).unroll(
length, inputs, begin_state=begin_state, layout=layout,
merge_outputs=merge_outputs)
class ModifierCell(HybridRecurrentCell):
"""Base class for modifier cells. A modifier
cell takes a base cell, apply modifications
on it (e.g. Zoneout), and returns a new cell.
After applying modifiers the base cell should
no longer be called directly. The modifier cell
should be used instead.
"""
def __init__(self, base_cell):
super(ModifierCell, self).__init__(prefix=None, params=None)
base_cell._modified = True
self.base_cell = base_cell
@property
def params(self):
self._own_params = False
return self.base_cell.params
def state_info(self, batch_size=0):
return self.base_cell.state_info(batch_size)
def begin_state(self, func=symbol.zeros, **kwargs):
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
self.base_cell._modified = False
begin = self.base_cell.begin_state(func=func, **kwargs)
self.base_cell._modified = True
return begin
def hybrid_forward(self, F, inputs, states):
raise NotImplementedError
def __repr__(self):
s = '{name}({base_cell})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class ZoneoutCell(ModifierCell):
"""Applies Zoneout on base cell."""
def __init__(self, base_cell, zoneout_outputs=0., zoneout_states=0.):
assert not isinstance(base_cell, BidirectionalCell), \
"BidirectionalCell doesn't support zoneout since it doesn't support step. " \
"Please add ZoneoutCell to the cells underneath instead."
assert not isinstance(base_cell, SequentialRNNCell) or not base_cell._bidirectional, \
"Bidirectional SequentialRNNCell doesn't support zoneout. " \
"Please add ZoneoutCell to the cells underneath instead."
super(ZoneoutCell, self).__init__(base_cell)
self.zoneout_outputs = zoneout_outputs
self.zoneout_states = zoneout_states
self.prev_output = None
def __repr__(self):
s = '{name}(p_out={zoneout_outputs}, p_state={zoneout_states}, {base_cell})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
def _alias(self):
return 'zoneout'
def reset(self):
super(ZoneoutCell, self).reset()
self.prev_output = None
def hybrid_forward(self, F, inputs, states):
cell, p_outputs, p_states = self.base_cell, self.zoneout_outputs, self.zoneout_states
next_output, next_states = cell(inputs, states)
mask = (lambda p, like: F.Dropout(F.ones_like(like), p=p))
prev_output = self.prev_output
if prev_output is None:
prev_output = F.zeros_like(next_output)
output = (F.where(mask(p_outputs, next_output), next_output, prev_output)
if p_outputs != 0. else next_output)
states = ([F.where(mask(p_states, new_s), new_s, old_s) for new_s, old_s in
zip(next_states, states)] if p_states != 0. else next_states)
self.prev_output = output
return output, states
class ResidualCell(ModifierCell):
"""
Adds residual connection as described in Wu et al, 2016
(https://arxiv.org/abs/1609.08144).
Output of the cell is output of the base cell plus input.
"""
def __init__(self, base_cell):
super(ResidualCell, self).__init__(base_cell)
def hybrid_forward(self, F, inputs, states):
output, states = self.base_cell(inputs, states)
output = F.elemwise_add(output, inputs, name="%s_plus_residual" % output.name)
return output, states
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
self.base_cell._modified = False
outputs, states = self.base_cell.unroll(length, inputs=inputs, begin_state=begin_state,
layout=layout, merge_outputs=merge_outputs)
self.base_cell._modified = True
merge_outputs = isinstance(outputs, tensor_types) if merge_outputs is None else \
merge_outputs
inputs, _, F, _ = _format_sequence(length, inputs, layout, merge_outputs)
if merge_outputs:
outputs = F.elemwise_add(outputs, inputs)
else:
outputs = [F.elemwise_add(i, j) for i, j in zip(outputs, inputs)]
return outputs, states
class BidirectionalCell(HybridRecurrentCell):
"""Bidirectional RNN cell.
Parameters
----------
l_cell : RecurrentCell
Cell for forward unrolling
r_cell : RecurrentCell
Cell for backward unrolling
"""
def __init__(self, l_cell, r_cell, output_prefix='bi_'):
super(BidirectionalCell, self).__init__(prefix='', params=None)
self.register_child(l_cell)
self.register_child(r_cell)
self._output_prefix = output_prefix
def __call__(self, inputs, states):
raise NotImplementedError("Bidirectional cannot be stepped. Please use unroll")
def __repr__(self):
s = '{name}(forward={l_cell}, backward={r_cell})'
return s.format(name=self.__class__.__name__,
l_cell=self._children[0],
r_cell=self._children[1])
def state_info(self, batch_size=0):
return _cells_state_info(self._children, batch_size)
def begin_state(self, **kwargs):
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
return _cells_begin_state(self._children, **kwargs)
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
self.reset()
inputs, axis, F, batch_size = _format_sequence(length, inputs, layout, False)
begin_state = _get_begin_state(self, F, begin_state, inputs, batch_size)
states = begin_state
l_cell, r_cell = self._children
l_outputs, l_states = l_cell.unroll(length, inputs=inputs,
begin_state=states[:len(l_cell.state_info(batch_size))],
layout=layout, merge_outputs=merge_outputs)
r_outputs, r_states = r_cell.unroll(length,
inputs=list(reversed(inputs)),
begin_state=states[len(l_cell.state_info(batch_size)):],
layout=layout, merge_outputs=merge_outputs)
if merge_outputs is None:
merge_outputs = (isinstance(l_outputs, tensor_types)
and isinstance(r_outputs, tensor_types))
l_outputs, _, _, _ = _format_sequence(None, l_outputs, layout, merge_outputs)
r_outputs, _, _, _ = _format_sequence(None, r_outputs, layout, merge_outputs)
if merge_outputs:
r_outputs = F.reverse(r_outputs, axis=axis)
outputs = F.concat(l_outputs, r_outputs, dim=2, name='%sout'%self._output_prefix)
else:
outputs = [F.concat(l_o, r_o, dim=1, name='%st%d'%(self._output_prefix, i))
for i, (l_o, r_o) in enumerate(zip(l_outputs, reversed(r_outputs)))]
states = l_states + r_states
return outputs, states
| |
from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy.fftpack import fft
from mne.datasets import testing
from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
read_source_spaces, MixedSourceEstimate, read_source_estimate,
morph_data, extract_label_time_course,
spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
spatial_inter_hemi_connectivity)
from mne.source_estimate import (compute_morph_matrix, grade_to_vertices,
grade_to_tris)
from mne.minimum_norm import read_inverse_operator
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_h5py, run_tests_if_main, slow_test)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_src_3 = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-4-src.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_smorph = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
rng = np.random.RandomState(0)
@testing.requires_testing_data
def test_spatial_inter_hemi_connectivity():
"""Test spatial connectivity between hemispheres"""
# trivial cases
conn = spatial_inter_hemi_connectivity(fname_src_3, 5e-6)
assert_equal(conn.data.size, 0)
conn = spatial_inter_hemi_connectivity(fname_src_3, 5e6)
assert_equal(conn.data.size, np.prod(conn.shape) // 2)
# actually interesting case (1cm), should be between 2 and 10% of verts
src = read_source_spaces(fname_src_3)
conn = spatial_inter_hemi_connectivity(src, 10e-3)
conn = conn.tocsr()
n_src = conn.shape[0]
assert_true(n_src * 0.02 < conn.data.size < n_src * 0.10)
assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0)
assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0)
c = (conn.T + conn) / 2. - conn
c.eliminate_zeros()
assert_equal(c.data.size, 0)
# check locations
upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray()
assert_equal(upper_right.sum(), conn.sum() // 2)
good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post',
'G_cuneus']
for hi, hemi in enumerate(('lh', 'rh')):
has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right,
axis=1 - hi))[0]]
labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi,
subjects_dir=subjects_dir)
use_labels = [l.name[:-3] for l in labels
if np.in1d(l.vertices, has_neighbors).any()]
assert_true(set(use_labels) - set(good_labels) == set())
@slow_test
@testing.requires_testing_data
def test_volume_stc():
"""Test volume STCs
"""
tempdir = _TempDir()
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.affine, t1_img.affine, decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.affine, t1_img.affine, decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@testing.requires_testing_data
def test_expand():
"""Test stc expansion
"""
stc = read_source_estimate(fname_stc, 'sample')
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def test_io_stc():
"""Test IO for STC files
"""
tempdir = _TempDir()
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_h5py
def test_io_stc_h5():
"""Test IO for STC files using HDF5
"""
tempdir = _TempDir()
stc = _fake_stc()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc.save(out_name, ftype='h5') # test overwrite
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
for stc_new in stc3, stc4:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files
"""
tempdir = _TempDir()
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
stc = _fake_stc()
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
@slow_test
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin, center_of_mass, resample"""
stc = read_source_estimate(fname_stc)
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
bin = stc.bin(.12)
a = np.array((1,), dtype=stc.data.dtype)
a[0] = np.mean(stc.data[0, stc.times < .12])
assert a[0] == bin.data[0, 0]
assert_raises(ValueError, stc.center_of_mass, 'sample')
assert_raises(TypeError, stc.center_of_mass, 'sample',
subjects_dir=subjects_dir, surf=1)
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
assert_raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
assert_raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc
"""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
with warnings.catch_warnings(record=True): # empty label
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@slow_test
@testing.requires_testing_data
def test_morph_data():
"""Test morphing of data
"""
tempdir = _TempDir()
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_array_equal(stc_to.time_as_index([0.09, 0.1], use_rounding=True),
[0, len(stc_to.times) - 1])
assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
subjects_dir=subjects_dir)
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# Morphing to a density that is too high should raise an informative error
# (here we need to push to grade=6, but for some subjects even grade=5
# will break)
assert_raises(ValueError, stc_to1.morph, subject_from, grade=6,
subjects_dir=subjects_dir)
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
# make sure we get a warning about # of steps
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=1, buffer_size=3,
subjects_dir=subjects_dir)
assert_equal(len(w), 2)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, vertices_to, 'foo')
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
vertices_to, morph_mat, subject_from='foo')
# steps warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=1, subjects_dir=subjects_dir)
assert_equal(len(w), 2)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# Morph sparse data
# Make a sparse stc
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result"""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = rng.randn(n_vertices, n_sensors)
sens_data = rng.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = rng.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertices[0]) == 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert_equal(len(w), 1)
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.to_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
assert_true(all([c in ['time', 'subject'] for c in
df.reset_index().columns][:2]))
def test_get_peak():
"""Test peak getter
"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
@testing.requires_testing_data
def test_mixed_stc():
"""Test source estimate from mixed source space
"""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
assert_raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
vol = read_source_spaces(fname_vsrc)
# make sure error is raised for plotting surface with volume source
assert_raises(ValueError, stc.plot_surface, src=vol)
run_tests_if_main()
| |
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db.models import ManyToManyField
from django.forms.models import model_to_dict
from django.test import Client, TestCase as _TestCase, override_settings
from django.urls import reverse, NoReverseMatch
from django.utils.text import slugify
from netaddr import IPNetwork
from taggit.managers import TaggableManager
from extras.models import Tag
from users.models import ObjectPermission
from utilities.permissions import resolve_permission_ct
from .utils import disable_warnings, extract_form_failures, post_data
__all__ = (
'TestCase',
'ModelTestCase',
'ModelViewTestCase',
'ViewTestCases',
)
class TestCase(_TestCase):
user_permissions = ()
def setUp(self):
# Create the test user and assign permissions
self.user = User.objects.create_user(username='testuser')
self.add_permissions(*self.user_permissions)
# Initialize the test client
self.client = Client()
self.client.force_login(self.user)
def prepare_instance(self, instance):
"""
Test cases can override this method to perform any necessary manipulation of an instance prior to its evaluation
against test data. For example, it can be used to decrypt a Secret's plaintext attribute.
"""
return instance
def model_to_dict(self, instance, fields, api=False):
"""
Return a dictionary representation of an instance.
"""
# Prepare the instance and call Django's model_to_dict() to extract all fields
model_dict = model_to_dict(self.prepare_instance(instance), fields=fields)
# Map any additional (non-field) instance attributes that were specified
for attr in fields:
if hasattr(instance, attr) and attr not in model_dict:
model_dict[attr] = getattr(instance, attr)
for key, value in list(model_dict.items()):
try:
field = instance._meta.get_field(key)
except FieldDoesNotExist:
# Attribute is not a model field
continue
# Handle ManyToManyFields
if value and type(field) in (ManyToManyField, TaggableManager):
if field.related_model is ContentType:
model_dict[key] = sorted([f'{ct.app_label}.{ct.model}' for ct in value])
else:
model_dict[key] = sorted([obj.pk for obj in value])
if api:
# Replace ContentType numeric IDs with <app_label>.<model>
if type(getattr(instance, key)) is ContentType:
ct = ContentType.objects.get(pk=value)
model_dict[key] = f'{ct.app_label}.{ct.model}'
# Convert IPNetwork instances to strings
elif type(value) is IPNetwork:
model_dict[key] = str(value)
else:
# Convert ArrayFields to CSV strings
if type(instance._meta.get_field(key)) is ArrayField:
model_dict[key] = ','.join([str(v) for v in value])
return model_dict
#
# Permissions management
#
def add_permissions(self, *names):
"""
Assign a set of permissions to the test user. Accepts permission names in the form <app>.<action>_<model>.
"""
for name in names:
ct, action = resolve_permission_ct(name)
obj_perm = ObjectPermission(name=name, actions=[action])
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ct)
#
# Custom assertions
#
def assertHttpStatus(self, response, expected_status):
"""
TestCase method. Provide more detail in the event of an unexpected HTTP response.
"""
err_message = None
# Construct an error message only if we know the test is going to fail
if response.status_code != expected_status:
if hasattr(response, 'data'):
# REST API response; pass the response data through directly
err = response.data
else:
# Attempt to extract form validation errors from the response HTML
form_errors = extract_form_failures(response.content)
err = form_errors or response.content or 'No data'
err_message = f"Expected HTTP status {expected_status}; received {response.status_code}: {err}"
self.assertEqual(response.status_code, expected_status, err_message)
def assertInstanceEqual(self, instance, data, exclude=None, api=False):
"""
Compare a model instance to a dictionary, checking that its attribute values match those specified
in the dictionary.
:param instance: Python object instance
:param data: Dictionary of test data used to define the instance
:param exclude: List of fields to exclude from comparison (e.g. passwords, which get hashed)
:param api: Set to True is the data is a JSON representation of the instance
"""
if exclude is None:
exclude = []
fields = [k for k in data.keys() if k not in exclude]
model_dict = self.model_to_dict(instance, fields=fields, api=api)
# Omit any dictionary keys which are not instance attributes or have been excluded
relevant_data = {
k: v for k, v in data.items() if hasattr(instance, k) and k not in exclude
}
self.assertDictEqual(model_dict, relevant_data)
#
# Convenience methods
#
@classmethod
def create_tags(cls, *names):
"""
Create and return a Tag instance for each name given.
"""
tags = [Tag(name=name, slug=slugify(name)) for name in names]
Tag.objects.bulk_create(tags)
return tags
class ModelTestCase(TestCase):
"""
Parent class for TestCases which deal with models.
"""
model = None
def _get_queryset(self):
"""
Return a base queryset suitable for use in test methods.
"""
return self.model.objects.all()
#
# UI Tests
#
class ModelViewTestCase(ModelTestCase):
"""
Base TestCase for model views. Subclass to test individual views.
"""
def _get_base_url(self):
"""
Return the base format for a URL for the test's model. Override this to test for a model which belongs
to a different app (e.g. testing Interfaces within the virtualization app).
"""
return '{}:{}_{{}}'.format(
self.model._meta.app_label,
self.model._meta.model_name
)
def _get_url(self, action, instance=None):
"""
Return the URL name for a specific action and optionally a specific instance
"""
url_format = self._get_base_url()
# If no instance was provided, assume we don't need a unique identifier
if instance is None:
return reverse(url_format.format(action))
# Attempt to resolve using slug as the unique identifier if one exists
if hasattr(self.model, 'slug'):
try:
return reverse(url_format.format(action), kwargs={'slug': instance.slug})
except NoReverseMatch:
pass
# Default to using the numeric PK to retrieve the URL for an object
return reverse(url_format.format(action), kwargs={'pk': instance.pk})
class ViewTestCases:
"""
We keep any TestCases with test_* methods inside a class to prevent unittest from trying to run them.
"""
class GetObjectViewTestCase(ModelViewTestCase):
"""
Retrieve a single instance.
"""
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_get_object_anonymous(self):
# Make the request as an unauthenticated user
self.client.logout()
response = self.client.get(self._get_queryset().first().get_absolute_url())
self.assertHttpStatus(response, 200)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get_object_without_permission(self):
instance = self._get_queryset().first()
# Try GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(instance.get_absolute_url()), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get_object_with_permission(self):
instance = self._get_queryset().first()
# Add model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['view']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with model-level permission
self.assertHttpStatus(self.client.get(instance.get_absolute_url()), 200)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get_object_with_constrained_permission(self):
instance1, instance2 = self._get_queryset().all()[:2]
# Add object-level permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': instance1.pk},
actions=['view']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET to permitted object
self.assertHttpStatus(self.client.get(instance1.get_absolute_url()), 200)
# Try GET to non-permitted object
self.assertHttpStatus(self.client.get(instance2.get_absolute_url()), 404)
class GetObjectChangelogViewTestCase(ModelViewTestCase):
"""
View the changelog for an instance.
"""
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_get_object_changelog(self):
url = self._get_url('changelog', self._get_queryset().first())
response = self.client.get(url)
self.assertHttpStatus(response, 200)
class CreateObjectViewTestCase(ModelViewTestCase):
"""
Create a single new instance.
:form_data: Data to be used when creating a new object.
"""
form_data = {}
def test_create_object_without_permission(self):
# Try GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('add')), 403)
# Try POST without permission
request = {
'path': self._get_url('add'),
'data': post_data(self.form_data),
}
response = self.client.post(**request)
with disable_warnings('django.request'):
self.assertHttpStatus(response, 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_create_object_with_permission(self):
initial_count = self._get_queryset().count()
# Assign unconstrained permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['add']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with model-level permission
self.assertHttpStatus(self.client.get(self._get_url('add')), 200)
# Try POST with model-level permission
request = {
'path': self._get_url('add'),
'data': post_data(self.form_data),
}
self.assertHttpStatus(self.client.post(**request), 302)
self.assertEqual(initial_count + 1, self._get_queryset().count())
self.assertInstanceEqual(self._get_queryset().order_by('pk').last(), self.form_data)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_create_object_with_constrained_permission(self):
initial_count = self._get_queryset().count()
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': 0}, # Dummy permission to deny all
actions=['add']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with object-level permission
self.assertHttpStatus(self.client.get(self._get_url('add')), 200)
# Try to create an object (not permitted)
request = {
'path': self._get_url('add'),
'data': post_data(self.form_data),
}
self.assertHttpStatus(self.client.post(**request), 200)
self.assertEqual(initial_count, self._get_queryset().count()) # Check that no object was created
# Update the ObjectPermission to allow creation
obj_perm.constraints = {'pk__gt': 0}
obj_perm.save()
# Try to create an object (permitted)
request = {
'path': self._get_url('add'),
'data': post_data(self.form_data),
}
self.assertHttpStatus(self.client.post(**request), 302)
self.assertEqual(initial_count + 1, self._get_queryset().count())
self.assertInstanceEqual(self._get_queryset().order_by('pk').last(), self.form_data)
class EditObjectViewTestCase(ModelViewTestCase):
"""
Edit a single existing instance.
:form_data: Data to be used when updating the first existing object.
"""
form_data = {}
def test_edit_object_without_permission(self):
instance = self._get_queryset().first()
# Try GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('edit', instance)), 403)
# Try POST without permission
request = {
'path': self._get_url('edit', instance),
'data': post_data(self.form_data),
}
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.post(**request), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_edit_object_with_permission(self):
instance = self._get_queryset().first()
# Assign model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['change']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with model-level permission
self.assertHttpStatus(self.client.get(self._get_url('edit', instance)), 200)
# Try POST with model-level permission
request = {
'path': self._get_url('edit', instance),
'data': post_data(self.form_data),
}
self.assertHttpStatus(self.client.post(**request), 302)
self.assertInstanceEqual(self._get_queryset().get(pk=instance.pk), self.form_data)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_edit_object_with_constrained_permission(self):
instance1, instance2 = self._get_queryset().all()[:2]
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': instance1.pk},
actions=['change']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with a permitted object
self.assertHttpStatus(self.client.get(self._get_url('edit', instance1)), 200)
# Try GET with a non-permitted object
self.assertHttpStatus(self.client.get(self._get_url('edit', instance2)), 404)
# Try to edit a permitted object
request = {
'path': self._get_url('edit', instance1),
'data': post_data(self.form_data),
}
self.assertHttpStatus(self.client.post(**request), 302)
self.assertInstanceEqual(self._get_queryset().get(pk=instance1.pk), self.form_data)
# Try to edit a non-permitted object
request = {
'path': self._get_url('edit', instance2),
'data': post_data(self.form_data),
}
self.assertHttpStatus(self.client.post(**request), 404)
class DeleteObjectViewTestCase(ModelViewTestCase):
"""
Delete a single instance.
"""
def test_delete_object_without_permission(self):
instance = self._get_queryset().first()
# Try GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('delete', instance)), 403)
# Try POST without permission
request = {
'path': self._get_url('delete', instance),
'data': post_data({'confirm': True}),
}
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.post(**request), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_delete_object_with_permission(self):
instance = self._get_queryset().first()
# Assign model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['delete']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with model-level permission
self.assertHttpStatus(self.client.get(self._get_url('delete', instance)), 200)
# Try POST with model-level permission
request = {
'path': self._get_url('delete', instance),
'data': post_data({'confirm': True}),
}
self.assertHttpStatus(self.client.post(**request), 302)
with self.assertRaises(ObjectDoesNotExist):
self._get_queryset().get(pk=instance.pk)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_delete_object_with_constrained_permission(self):
instance1, instance2 = self._get_queryset().all()[:2]
# Assign object-level permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': instance1.pk},
actions=['delete']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with a permitted object
self.assertHttpStatus(self.client.get(self._get_url('delete', instance1)), 200)
# Try GET with a non-permitted object
self.assertHttpStatus(self.client.get(self._get_url('delete', instance2)), 404)
# Try to delete a permitted object
request = {
'path': self._get_url('delete', instance1),
'data': post_data({'confirm': True}),
}
self.assertHttpStatus(self.client.post(**request), 302)
with self.assertRaises(ObjectDoesNotExist):
self._get_queryset().get(pk=instance1.pk)
# Try to delete a non-permitted object
request = {
'path': self._get_url('delete', instance2),
'data': post_data({'confirm': True}),
}
self.assertHttpStatus(self.client.post(**request), 404)
self.assertTrue(self._get_queryset().filter(pk=instance2.pk).exists())
class ListObjectsViewTestCase(ModelViewTestCase):
"""
Retrieve multiple instances.
"""
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_list_objects_anonymous(self):
# Make the request as an unauthenticated user
self.client.logout()
response = self.client.get(self._get_url('list'))
self.assertHttpStatus(response, 200)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_list_objects_without_permission(self):
# Try GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('list')), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_list_objects_with_permission(self):
# Add model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['view']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with model-level permission
self.assertHttpStatus(self.client.get(self._get_url('list')), 200)
# Built-in CSV export
if hasattr(self.model, 'csv_headers'):
response = self.client.get('{}?export'.format(self._get_url('list')))
self.assertHttpStatus(response, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_list_objects_with_constrained_permission(self):
instance1, instance2 = self._get_queryset().all()[:2]
# Add object-level permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': instance1.pk},
actions=['view']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with object-level permission
response = self.client.get(self._get_url('list'))
self.assertHttpStatus(response, 200)
content = str(response.content)
if hasattr(self.model, 'name'):
self.assertIn(instance1.name, content)
self.assertNotIn(instance2.name, content)
else:
self.assertIn(instance1.get_absolute_url(), content)
self.assertNotIn(instance2.get_absolute_url(), content)
class CreateMultipleObjectsViewTestCase(ModelViewTestCase):
"""
Create multiple instances using a single form. Expects the creation of three new instances by default.
:bulk_create_count: The number of objects expected to be created (default: 3).
:bulk_create_data: A dictionary of data to be used for bulk object creation.
"""
bulk_create_count = 3
bulk_create_data = {}
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_create_multiple_objects_without_permission(self):
request = {
'path': self._get_url('add'),
'data': post_data(self.bulk_create_data),
}
# Try POST without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.post(**request), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_create_multiple_objects_with_permission(self):
initial_count = self._get_queryset().count()
request = {
'path': self._get_url('add'),
'data': post_data(self.bulk_create_data),
}
# Assign non-constrained permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['add'],
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Bulk create objects
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
self.assertEqual(initial_count + self.bulk_create_count, self._get_queryset().count())
for instance in self._get_queryset().order_by('-pk')[:self.bulk_create_count]:
self.assertInstanceEqual(instance, self.bulk_create_data)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_create_multiple_objects_with_constrained_permission(self):
initial_count = self._get_queryset().count()
request = {
'path': self._get_url('add'),
'data': post_data(self.bulk_create_data),
}
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['add'],
constraints={'pk': 0} # Dummy constraint to deny all
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Attempt to make the request with unmet constraints
self.assertHttpStatus(self.client.post(**request), 200)
self.assertEqual(self._get_queryset().count(), initial_count)
# Update the ObjectPermission to allow creation
obj_perm.constraints = {'pk__gt': 0} # Dummy constraint to allow all
obj_perm.save()
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
self.assertEqual(initial_count + self.bulk_create_count, self._get_queryset().count())
for instance in self._get_queryset().order_by('-pk')[:self.bulk_create_count]:
self.assertInstanceEqual(instance, self.bulk_create_data)
class BulkImportObjectsViewTestCase(ModelViewTestCase):
"""
Create multiple instances from imported data.
:csv_data: A list of CSV-formatted lines (starting with the headers) to be used for bulk object import.
"""
csv_data = ()
def _get_csv_data(self):
return '\n'.join(self.csv_data)
def test_bulk_import_objects_without_permission(self):
data = {
'csv': self._get_csv_data(),
}
# Test GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('import')), 403)
# Try POST without permission
response = self.client.post(self._get_url('import'), data)
with disable_warnings('django.request'):
self.assertHttpStatus(response, 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_bulk_import_objects_with_permission(self):
initial_count = self._get_queryset().count()
data = {
'csv': self._get_csv_data(),
}
# Assign model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['add']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try GET with model-level permission
self.assertHttpStatus(self.client.get(self._get_url('import')), 200)
# Test POST with permission
self.assertHttpStatus(self.client.post(self._get_url('import'), data), 200)
self.assertEqual(self._get_queryset().count(), initial_count + len(self.csv_data) - 1)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_bulk_import_objects_with_constrained_permission(self):
initial_count = self._get_queryset().count()
data = {
'csv': self._get_csv_data(),
}
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': 0}, # Dummy permission to deny all
actions=['add']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Attempt to import non-permitted objects
self.assertHttpStatus(self.client.post(self._get_url('import'), data), 200)
self.assertEqual(self._get_queryset().count(), initial_count)
# Update permission constraints
obj_perm.constraints = {'pk__gt': 0} # Dummy permission to allow all
obj_perm.save()
# Import permitted objects
self.assertHttpStatus(self.client.post(self._get_url('import'), data), 200)
self.assertEqual(self._get_queryset().count(), initial_count + len(self.csv_data) - 1)
class BulkEditObjectsViewTestCase(ModelViewTestCase):
"""
Edit multiple instances.
:bulk_edit_data: A dictionary of data to be used when bulk editing a set of objects. This data should differ
from that used for initial object creation within setUpTestData().
"""
bulk_edit_data = {}
def test_bulk_edit_objects_without_permission(self):
pk_list = self._get_queryset().values_list('pk', flat=True)[:3]
data = {
'pk': pk_list,
'_apply': True, # Form button
}
# Test GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('bulk_edit')), 403)
# Try POST without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.post(self._get_url('bulk_edit'), data), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_bulk_edit_objects_with_permission(self):
pk_list = self._get_queryset().values_list('pk', flat=True)[:3]
data = {
'pk': pk_list,
'_apply': True, # Form button
}
# Append the form data to the request
data.update(post_data(self.bulk_edit_data))
# Assign model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['change']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try POST with model-level permission
self.assertHttpStatus(self.client.post(self._get_url('bulk_edit'), data), 302)
for i, instance in enumerate(self._get_queryset().filter(pk__in=pk_list)):
self.assertInstanceEqual(instance, self.bulk_edit_data)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_bulk_edit_objects_with_constrained_permission(self):
pk_list = list(self._get_queryset().values_list('pk', flat=True)[:3])
data = {
'pk': pk_list,
'_apply': True, # Form button
}
# Append the form data to the request
data.update(post_data(self.bulk_edit_data))
# Dynamically determine a constraint that will *not* be matched by the updated objects.
attr_name = list(self.bulk_edit_data.keys())[0]
field = self.model._meta.get_field(attr_name)
value = field.value_from_object(self._get_queryset().first())
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={attr_name: value},
actions=['change']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Attempt to bulk edit permitted objects into a non-permitted state
response = self.client.post(self._get_url('bulk_edit'), data)
self.assertHttpStatus(response, 200)
# Update permission constraints
obj_perm.constraints = {'pk__gt': 0}
obj_perm.save()
# Bulk edit permitted objects
self.assertHttpStatus(self.client.post(self._get_url('bulk_edit'), data), 302)
for i, instance in enumerate(self._get_queryset().filter(pk__in=pk_list)):
self.assertInstanceEqual(instance, self.bulk_edit_data)
class BulkDeleteObjectsViewTestCase(ModelViewTestCase):
"""
Delete multiple instances.
"""
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_bulk_delete_objects_without_permission(self):
pk_list = self._get_queryset().values_list('pk', flat=True)[:3]
data = {
'pk': pk_list,
'confirm': True,
'_confirm': True, # Form button
}
# Test GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('bulk_delete')), 403)
# Try POST without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.post(self._get_url('bulk_delete'), data), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_bulk_delete_objects_with_permission(self):
pk_list = self._get_queryset().values_list('pk', flat=True)
data = {
'pk': pk_list,
'confirm': True,
'_confirm': True, # Form button
}
# Assign unconstrained permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['delete']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try POST with model-level permission
self.assertHttpStatus(self.client.post(self._get_url('bulk_delete'), data), 302)
self.assertEqual(self._get_queryset().count(), 0)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_bulk_delete_objects_with_constrained_permission(self):
initial_count = self._get_queryset().count()
pk_list = self._get_queryset().values_list('pk', flat=True)
data = {
'pk': pk_list,
'confirm': True,
'_confirm': True, # Form button
}
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'pk': 0}, # Dummy permission to deny all
actions=['delete']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Attempt to bulk delete non-permitted objects
self.assertHttpStatus(self.client.post(self._get_url('bulk_delete'), data), 302)
self.assertEqual(self._get_queryset().count(), initial_count)
# Update permission constraints
obj_perm.constraints = {'pk__gt': 0} # Dummy permission to allow all
obj_perm.save()
# Bulk delete permitted objects
self.assertHttpStatus(self.client.post(self._get_url('bulk_delete'), data), 302)
self.assertEqual(self._get_queryset().count(), 0)
class BulkRenameObjectsViewTestCase(ModelViewTestCase):
"""
Rename multiple instances.
"""
rename_data = {
'find': '^(.*)$',
'replace': '\\1X', # Append an X to the original value
'use_regex': True,
}
def test_bulk_rename_objects_without_permission(self):
pk_list = self._get_queryset().values_list('pk', flat=True)[:3]
data = {
'pk': pk_list,
'_apply': True, # Form button
}
data.update(self.rename_data)
# Test GET without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.get(self._get_url('bulk_rename')), 403)
# Try POST without permission
with disable_warnings('django.request'):
self.assertHttpStatus(self.client.post(self._get_url('bulk_rename'), data), 403)
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_bulk_rename_objects_with_permission(self):
objects = self._get_queryset().all()[:3]
pk_list = [obj.pk for obj in objects]
data = {
'pk': pk_list,
'_apply': True, # Form button
}
data.update(self.rename_data)
# Assign model-level permission
obj_perm = ObjectPermission(
name='Test permission',
actions=['change']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Try POST with model-level permission
self.assertHttpStatus(self.client.post(self._get_url('bulk_rename'), data), 302)
for i, instance in enumerate(self._get_queryset().filter(pk__in=pk_list)):
self.assertEqual(instance.name, f'{objects[i].name}X')
@override_settings(EXEMPT_VIEW_PERMISSIONS=['*'])
def test_bulk_rename_objects_with_constrained_permission(self):
objects = self._get_queryset().all()[:3]
pk_list = [obj.pk for obj in objects]
data = {
'pk': pk_list,
'_apply': True, # Form button
}
data.update(self.rename_data)
# Assign constrained permission
obj_perm = ObjectPermission(
name='Test permission',
constraints={'name__regex': '[^X]$'},
actions=['change']
)
obj_perm.save()
obj_perm.users.add(self.user)
obj_perm.object_types.add(ContentType.objects.get_for_model(self.model))
# Attempt to bulk edit permitted objects into a non-permitted state
response = self.client.post(self._get_url('bulk_rename'), data)
self.assertHttpStatus(response, 200)
# Update permission constraints
obj_perm.constraints = {'pk__gt': 0}
obj_perm.save()
# Bulk rename permitted objects
self.assertHttpStatus(self.client.post(self._get_url('bulk_rename'), data), 302)
for i, instance in enumerate(self._get_queryset().filter(pk__in=pk_list)):
self.assertEqual(instance.name, f'{objects[i].name}X')
class PrimaryObjectViewTestCase(
GetObjectViewTestCase,
GetObjectChangelogViewTestCase,
CreateObjectViewTestCase,
EditObjectViewTestCase,
DeleteObjectViewTestCase,
ListObjectsViewTestCase,
BulkImportObjectsViewTestCase,
BulkEditObjectsViewTestCase,
BulkDeleteObjectsViewTestCase,
):
"""
TestCase suitable for testing all standard View functions for primary objects
"""
maxDiff = None
class OrganizationalObjectViewTestCase(
GetObjectChangelogViewTestCase,
CreateObjectViewTestCase,
EditObjectViewTestCase,
DeleteObjectViewTestCase,
ListObjectsViewTestCase,
BulkImportObjectsViewTestCase,
BulkDeleteObjectsViewTestCase,
):
"""
TestCase suitable for all organizational objects
"""
maxDiff = None
class DeviceComponentTemplateViewTestCase(
EditObjectViewTestCase,
DeleteObjectViewTestCase,
CreateMultipleObjectsViewTestCase,
BulkEditObjectsViewTestCase,
BulkRenameObjectsViewTestCase,
BulkDeleteObjectsViewTestCase,
):
"""
TestCase suitable for testing device component template models (ConsolePortTemplates, InterfaceTemplates, etc.)
"""
maxDiff = None
class DeviceComponentViewTestCase(
GetObjectViewTestCase,
GetObjectChangelogViewTestCase,
EditObjectViewTestCase,
DeleteObjectViewTestCase,
ListObjectsViewTestCase,
CreateMultipleObjectsViewTestCase,
BulkImportObjectsViewTestCase,
BulkEditObjectsViewTestCase,
BulkRenameObjectsViewTestCase,
BulkDeleteObjectsViewTestCase,
):
"""
TestCase suitable for testing device component models (ConsolePorts, Interfaces, etc.)
"""
maxDiff = None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/OperationDefinition) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class OperationDefinition(domainresource.DomainResource):
""" Definition of an operation or a named query.
A formal computable definition of an operation (on the RESTful interface)
or a named query (using the search interaction).
"""
resource_type = "OperationDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.affectsState = None
""" Whether content is changed by the operation.
Type `bool`. """
self.base = None
""" Marks this as a profile of the base.
Type `str`. """
self.code = None
""" Name used to invoke the operation.
Type `str`. """
self.comment = None
""" Additional information about use.
Type `str`. """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the operation definition.
Type `str`. """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.inputProfile = None
""" Validation information for in parameters.
Type `str`. """
self.instance = None
""" Invoke on an instance?.
Type `bool`. """
self.jurisdiction = None
""" Intended jurisdiction for operation definition (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.kind = None
""" operation | query.
Type `str`. """
self.name = None
""" Name for this operation definition (computer friendly).
Type `str`. """
self.outputProfile = None
""" Validation information for out parameters.
Type `str`. """
self.overload = None
""" Define overloaded variants for when generating code.
List of `OperationDefinitionOverload` items (represented as `dict` in JSON). """
self.parameter = None
""" Parameters for the operation/query.
List of `OperationDefinitionParameter` items (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this operation definition is defined.
Type `str`. """
self.resource = None
""" Types this operation applies to.
List of `str` items. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.system = None
""" Invoke at the system level?.
Type `bool`. """
self.title = None
""" Name for this operation definition (human friendly).
Type `str`. """
self.type = None
""" Invoke at the type level?.
Type `bool`. """
self.url = None
""" Canonical identifier for this operation definition, represented as
a URI (globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the operation definition.
Type `str`. """
super(OperationDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(OperationDefinition, self).elementProperties()
js.extend([
("affectsState", "affectsState", bool, False, None, False),
("base", "base", str, False, None, False),
("code", "code", str, False, None, True),
("comment", "comment", str, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("experimental", "experimental", bool, False, None, False),
("inputProfile", "inputProfile", str, False, None, False),
("instance", "instance", bool, False, None, True),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("kind", "kind", str, False, None, True),
("name", "name", str, False, None, True),
("outputProfile", "outputProfile", str, False, None, False),
("overload", "overload", OperationDefinitionOverload, True, None, False),
("parameter", "parameter", OperationDefinitionParameter, True, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("resource", "resource", str, True, None, False),
("status", "status", str, False, None, True),
("system", "system", bool, False, None, True),
("title", "title", str, False, None, False),
("type", "type", bool, False, None, True),
("url", "url", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class OperationDefinitionOverload(backboneelement.BackboneElement):
""" Define overloaded variants for when generating code.
Defines an appropriate combination of parameters to use when invoking this
operation, to help code generators when generating overloaded parameter
sets for this operation.
"""
resource_type = "OperationDefinitionOverload"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comment = None
""" Comments to go on overload.
Type `str`. """
self.parameterName = None
""" Name of parameter to include in overload.
List of `str` items. """
super(OperationDefinitionOverload, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(OperationDefinitionOverload, self).elementProperties()
js.extend([
("comment", "comment", str, False, None, False),
("parameterName", "parameterName", str, True, None, False),
])
return js
class OperationDefinitionParameter(backboneelement.BackboneElement):
""" Parameters for the operation/query.
The parameters for the operation/query.
"""
resource_type = "OperationDefinitionParameter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.binding = None
""" ValueSet details if this is coded.
Type `OperationDefinitionParameterBinding` (represented as `dict` in JSON). """
self.documentation = None
""" Description of meaning/use.
Type `str`. """
self.max = None
""" Maximum Cardinality (a number or *).
Type `str`. """
self.min = None
""" Minimum Cardinality.
Type `int`. """
self.name = None
""" Name in Parameters.parameter.name or in URL.
Type `str`. """
self.part = None
""" Parts of a nested Parameter.
List of `OperationDefinitionParameter` items (represented as `dict` in JSON). """
self.referencedFrom = None
""" References to this parameter.
List of `OperationDefinitionParameterReferencedFrom` items (represented as `dict` in JSON). """
self.searchType = None
""" number | date | string | token | reference | composite | quantity |
uri | special.
Type `str`. """
self.targetProfile = None
""" If type is Reference | canonical, allowed targets.
List of `str` items. """
self.type = None
""" What type this parameter has.
Type `str`. """
self.use = None
""" in | out.
Type `str`. """
super(OperationDefinitionParameter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(OperationDefinitionParameter, self).elementProperties()
js.extend([
("binding", "binding", OperationDefinitionParameterBinding, False, None, False),
("documentation", "documentation", str, False, None, False),
("max", "max", str, False, None, True),
("min", "min", int, False, None, True),
("name", "name", str, False, None, True),
("part", "part", OperationDefinitionParameter, True, None, False),
("referencedFrom", "referencedFrom", OperationDefinitionParameterReferencedFrom, True, None, False),
("searchType", "searchType", str, False, None, False),
("targetProfile", "targetProfile", str, True, None, False),
("type", "type", str, False, None, False),
("use", "use", str, False, None, True),
])
return js
class OperationDefinitionParameterBinding(backboneelement.BackboneElement):
""" ValueSet details if this is coded.
Binds to a value set if this parameter is coded (code, Coding,
CodeableConcept).
"""
resource_type = "OperationDefinitionParameterBinding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.strength = None
""" required | extensible | preferred | example.
Type `str`. """
self.valueSet = None
""" Source of value set.
Type `str`. """
super(OperationDefinitionParameterBinding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(OperationDefinitionParameterBinding, self).elementProperties()
js.extend([
("strength", "strength", str, False, None, True),
("valueSet", "valueSet", str, False, None, True),
])
return js
class OperationDefinitionParameterReferencedFrom(backboneelement.BackboneElement):
""" References to this parameter.
Identifies other resource parameters within the operation invocation that
are expected to resolve to this resource.
"""
resource_type = "OperationDefinitionParameterReferencedFrom"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.source = None
""" Referencing parameter.
Type `str`. """
self.sourceId = None
""" Element id of reference.
Type `str`. """
super(OperationDefinitionParameterReferencedFrom, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(OperationDefinitionParameterReferencedFrom, self).elementProperties()
js.extend([
("source", "source", str, False, None, True),
("sourceId", "sourceId", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| |
# -*- coding: utf-8 -*-
#
# Copyright 2014-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - logistic regression subcommand processing dispatching
"""
import sys
import os
import bigml.api
import bigmler.utils as u
import bigmler.resourcesapi.common as r
import bigmler.resourcesapi.logistic_regressions as rlr
import bigmler.resourcesapi.batch_predictions as rbp
import bigmler.pre_model_steps as pms
import bigmler.processing.args as a
import bigmler.processing.logisticregressions as plr
import bigmler.processing.sources as ps
import bigmler.processing.datasets as pd
from bigmler.resourcesapi.datasets import set_basic_dataset_args
from bigmler.defaults import DEFAULTS_FILE
from bigmler.sl_prediction import prediction, remote_prediction
from bigmler.reports import clear_reports, upload_reports
from bigmler.command import get_context
from bigmler.evaluation import evaluate
from bigmler.dispatcher import (SESSIONS_LOG,
clear_log_files, get_test_dataset,
get_objective_id)
COMMAND_LOG = ".bigmler_logistic_regression"
DIRS_LOG = ".bigmler_logistic_regression_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
MINIMUM_MODEL = "full=false"
DEFAULT_OUTPUT = "predictions.csv"
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def logistic_regression_dispatcher(args=sys.argv[1:]):
"""Parses command line and calls the different processing functions
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
settings = {}
settings.update(SETTINGS)
if '--evaluate' in args:
settings.update({"default_output": "evaluation"})
command_args, _, api, session_file, _ = get_context(args, settings)
# Selects the action to perform
if (a.has_train(command_args) or a.has_test(command_args)
or command_args.export_fields):
compute_output(api, command_args)
u.log_message("_" * 80 + "\n", log_file=session_file)
def compute_output(api, args):
""" Creates one or more models using the `training_set` or uses the ids
of previously created BigML models to make predictions for the `test_set`.
"""
logistic_regression = None
logistic_regressions = None
# no multi-label support at present
# variables from command-line options
resume = args.resume_
logistic_regression_ids = args.logistic_regression_ids_
output = args.predictions
# there's only one logistic regression to be generated at present
args.max_parallel_logistic_regressions = 1
# logistic regressions cannot be published yet.
args.public_logistic_regression = False
# It is compulsory to have a description to publish either datasets or
# logistic regressions
if (not args.description_ and (args.public_logistic_regression or
args.public_dataset)):
sys.exit("You should provide a description to publish.")
# When using --new-fields, it is compulsory to specify also a dataset
# id
if args.new_fields and not args.dataset:
sys.exit("To use --new-fields you must also provide a dataset id"
" to generate the new dataset from it.")
path = u.check_dir(output)
session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
csv_properties = {}
if args.objective_field:
csv_properties.update({'objective_field': args.objective_field})
# If logging is required set the file for logging
log = None
if args.log_file:
u.check_dir(args.log_file)
log = args.log_file
# If --clear_logs the log files are cleared
clear_log_files([log])
# basic pre-model step: creating or retrieving the source related info
source, resume, csv_properties, fields = pms.get_source_info(
api, args, resume, csv_properties, session_file, path, log)
# basic pre-model step: creating or retrieving the dataset related info
dataset_properties = pms.get_dataset_info(
api, args, resume, source,
csv_properties, fields, session_file, path, log)
(_, datasets, test_dataset,
resume, csv_properties, fields) = dataset_properties
if datasets:
# Now we have a dataset, let's check if there's an objective_field
# given by the user and update it in the fields structure
args.objective_id_ = get_objective_id(args, fields)
if args.logistic_file:
# logistic regression is retrieved from the contents of the given local
# JSON file
logistic_regression, csv_properties, fields = u.read_local_resource(
args.logistic_file,
csv_properties=csv_properties)
logistic_regressions = [logistic_regression]
logistic_regression_ids = [logistic_regression['resource']]
else:
# logistic regression is retrieved from the remote object
logistic_regressions, logistic_regression_ids, resume = \
plr.logistic_regressions_processing( \
datasets, logistic_regressions, logistic_regression_ids, \
api, args, resume, fields=fields, \
session_file=session_file, path=path, log=log)
if logistic_regressions:
logistic_regression = logistic_regressions[0]
# We update the logistic regression's public state if needed
if logistic_regression:
if isinstance(logistic_regression, str):
if not a.has_test(args):
query_string = MINIMUM_MODEL
elif args.export_fields:
query_string = r.ALL_FIELDS_QS
else:
query_string = ''
logistic_regression = u.check_resource(logistic_regression,
api.get_logistic_regression,
query_string=query_string)
logistic_regressions[0] = logistic_regression
if (args.public_logistic_regression or
(args.shared_flag and r.shared_changed(args.shared,
logistic_regression))):
logistic_regression_args = {}
if args.shared_flag and r.shared_changed(args.shared,
logistic_regression):
logistic_regression_args.update(shared=args.shared)
if args.public_logistic_regression:
logistic_regression_args.update( \
rlr.set_publish_logistic_regression_args(args))
if logistic_regression_args:
logistic_regression = rlr.update_logistic_regression( \
logistic_regression, logistic_regression_args, args,
api=api, path=path, \
session_file=session_file)
logistic_regressions[0] = logistic_regression
# We get the fields of the logistic_regression if we haven't got
# them yet and need them
if logistic_regression and (args.test_set or args.export_fields):
fields = plr.get_logistic_fields( \
logistic_regression, csv_properties, args)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
# If predicting
if logistic_regressions and (a.has_test(args) or \
(test_dataset and args.remote)):
if test_dataset is None:
test_dataset = get_test_dataset(args)
# Remote predictions: predictions are computed as batch predictions
# in bigml.com except when --no-batch flag is set on
if args.remote and not args.no_batch:
# create test source from file
test_name = "%s - test" % args.name
if args.test_source is None:
test_properties = ps.test_source_processing(
api, args, resume, name=test_name,
session_file=session_file, path=path, log=log)
(test_source, resume,
csv_properties, test_fields) = test_properties
else:
test_source_id = bigml.api.get_source_id(args.test_source)
test_source = api.check_resource(test_source_id)
if test_dataset is None:
# create test dataset from test source
dataset_args = set_basic_dataset_args(args, name=test_name)
test_dataset, resume = pd.alternative_dataset_processing(
test_source, "test", dataset_args, api, args,
resume, session_file=session_file, path=path, log=log)
else:
test_dataset_id = bigml.api.get_dataset_id(test_dataset)
test_dataset = api.check_resource(test_dataset_id)
csv_properties.update(objective_field=None,
objective_field_present=False)
test_fields = pd.get_fields_structure(test_dataset,
csv_properties)
batch_prediction_args = rbp.set_batch_prediction_args(
args, fields=fields,
dataset_fields=test_fields)
remote_prediction(logistic_regression, test_dataset, \
batch_prediction_args, args, \
api, resume, prediction_file=output, \
session_file=session_file, path=path, log=log)
else:
prediction(logistic_regressions, fields, args,
session_file=session_file)
# If evaluate flag is on, create remote evaluation and save results in
# json and human-readable format.
if args.evaluate:
# When we resume evaluation and models were already completed, we
# should use the datasets array as test datasets
if args.has_test_datasets_:
test_dataset = get_test_dataset(args)
if args.dataset_off and not args.has_test_datasets_:
args.test_dataset_ids = datasets
if args.test_dataset_ids and args.dataset_off:
# Evaluate the models with the corresponding test datasets.
test_dataset_id = bigml.api.get_dataset_id( \
args.test_dataset_ids[0])
test_dataset = api.check_resource(test_dataset_id)
csv_properties.update(objective_field=None,
objective_field_present=False)
test_fields = pd.get_fields_structure(test_dataset,
csv_properties)
resume = evaluate(logistic_regressions, args.test_dataset_ids, api,
args, resume,
fields=fields, dataset_fields=test_fields,
session_file=session_file, path=path,
log=log,
objective_field=args.objective_field)
else:
dataset = datasets[0]
if args.test_split > 0 or args.has_test_datasets_:
dataset = test_dataset
dataset = u.check_resource(dataset, api=api,
query_string=r.ALL_FIELDS_QS)
dataset_fields = pd.get_fields_structure(dataset, None)
resume = evaluate(logistic_regressions, [dataset], api,
args, resume,
fields=fields, dataset_fields=dataset_fields,
session_file=session_file, path=path,
log=log,
objective_field=args.objective_field)
u.print_generated_files(path, log_file=session_file,
verbosity=args.verbosity)
if args.reports:
clear_reports(path)
if args.upload:
upload_reports(args.reports, path)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <discoursegraphs.programming@arne.cl>
from cStringIO import StringIO
import os
import sys
import lxml
import pytest
import discoursegraphs as dg
from discoursegraphs.readwrite.exportxml import (
ExportXMLCorpus, ExportXMLDocumentGraph)
class Capturing(list):
"""Context manager that captures STDOUT.
source: http://stackoverflow.com/a/16571630
"""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
text_0_stats = ['Name: text_0',
'Type: ExportXMLDocumentGraph',
'Number of nodes: 1592',
'Number of edges: 1685',
'Average in degree: 1.0584',
'Average out degree: 1.0584 ',
'',
'Node statistics',
'===============',
'',
'number of nodes with layers',
'\texportxml - 1592',
'\texportxml:syntax - 837',
'\texportxml:token - 675',
'\texportxml:markable - 87',
'\texportxml:coreferential - 46',
'\texportxml:ne - 44',
'\texportxml:anaphoric - 25',
'\texportxml:expletive - 5',
'\texportxml:connective - 1',
'',
'number of nodes with attributes',
'\tlayers - 1592',
'\tlabel - 1556',
'\texportxml:func - 1512',
'\texportxml:parent - 1365',
'\texportxml:cat - 837',
'\texportxml:form - 675',
'\texportxml:lemma - 675',
'\texportxml:pos - 675',
'\texportxml:deprel - 675',
'\texportxml:token - 675',
'\texportxml:dephead - 530',
'\texportxml:morph - 447',
'\trelation - 76',
'\texportxml:type - 44',
'\ttokens - 35',
'\texportxml:comment - 3',
'\tconnective - 1',
'\tmetadata - 1',
'',
'Edge statistics',
'===============',
'',
'number of edges with layers',
'\texportxml - 1685',
'\texportxml:coreference - 76',
'\texportxml:ne - 58',
'\texportxml:coreferential - 46',
'\texportxml:anaphoric - 30',
'\texportxml:secedge - 4',
'',
'number of edges with attributes',
'\tlayers - 1685',
'\tedge_type - 1685',
'\tlabel - 138',
'',
'most common source edges',
'\ttext_0 - 35',
'\ts33_541 - 7',
'\ts6_538 - 7',
'\ts6_536 - 7',
'\ts7_544 - 7',
'',
'most common target edges',
'\ts33_527 - 3',
'\ts33_515 - 3',
'\ts21_532 - 3',
'\ts19_519 - 3',
'\ts25_505 - 3']
text_9_stats = ['Name: text_9',
'Type: ExportXMLDocumentGraph',
'Number of nodes: 1369',
'Number of edges: 2431',
'Average in degree: 1.7757',
'Average out degree: 1.7757 ',
'',
'Node statistics',
'===============',
'',
'number of nodes with layers',
'\texportxml - 1369',
'\texportxml:syntax - 703',
'\texportxml:token - 553',
'\texportxml:edu - 49',
'\texportxml:relation - 34',
'\texportxml:discourse - 34',
'\texportxml:markable - 30',
'\texportxml:edu:range - 13',
'\texportxml:anaphoric - 13',
'\texportxml:ne - 13',
'\texportxml:topic - 5',
'\texportxml:coreferential - 4',
'\texportxml:expletive - 2',
'',
'number of nodes with attributes',
'\tlayers - 1369',
'\tlabel - 1269',
'\texportxml:func - 1256',
'\texportxml:parent - 1128',
'\texportxml:cat - 703',
'\texportxml:token - 553',
'\texportxml:pos - 553',
'\texportxml:deprel - 553',
'\texportxml:form - 553',
'\texportxml:lemma - 550',
'\texportxml:dephead - 429',
'\texportxml:morph - 335',
'\ttokens - 86',
'\texportxml:relation - 34',
'\texportxml:arg2 - 34',
'\texportxml:marking - 34',
'\texportxml:span - 24',
'\trelation - 19',
'\texportxml:type - 13',
'\tdescription - 5',
'\tmetadata - 1',
'\texportxml:comment - 1',
'',
'Edge statistics',
'===============',
'',
'number of edges with layers',
'\texportxml - 2431',
'\texportxml:topic - 524',
'\texportxml:edu - 511',
'\texportxml:relation - 36',
'\texportxml:discourse - 36',
'\texportxml:edu:range - 26',
'\texportxml:ne - 23',
'\texportxml:coreference - 22',
'\texportxml:anaphoric - 18',
'\texportxml:coreferential - 4',
'\texportxml:secedge - 1',
'',
'number of edges with attributes',
'\tlayers - 2431',
'\tedge_type - 2431',
'\tlabel - 82',
'\trelation - 36',
'',
'most common source edges',
'\ttopic_9_3 - 238',
'\ttopic_9_2 - 131',
'\ttopic_9_4 - 68',
'\ttopic_9_1 - 45',
'\ttopic_9_0 - 42',
'',
'most common target edges',
'\ts133_14 - 4',
'\ts132_4 - 4',
'\ts132_9 - 4',
'\ts134_9 - 4',
'\ts154_7 - 4']
text_22_stats = ['Name: text_22',
'Type: ExportXMLDocumentGraph',
'Number of nodes: 1331',
'Number of edges: 1386',
'Average in degree: 1.0413',
'Average out degree: 1.0413 ',
'',
'Node statistics',
'===============',
'',
'number of nodes with layers',
'\texportxml - 1331',
'\texportxml:syntax - 684',
'\texportxml:token - 552',
'\texportxml:markable - 62',
'\texportxml:ne - 58',
'\texportxml:coreferential - 39',
'\texportxml:anaphoric - 6',
'\texportxml:inherent_reflexive - 3',
'\texportxml:split_antecedent - 2',
'\texportxml:relation - 1',
'\texportxml:targetspan - 1',
'\texportxml:expletive - 1',
'',
'number of nodes with attributes',
'\tlayers - 1331',
'\tlabel - 1294',
'\texportxml:func - 1236',
'\texportxml:parent - 1143',
'\texportxml:cat - 684',
'\texportxml:form - 552',
'\texportxml:pos - 552',
'\texportxml:deprel - 552',
'\texportxml:token - 552',
'\texportxml:lemma - 550',
'\texportxml:dephead - 459',
'\texportxml:morph - 381',
'\texportxml:type - 58',
'\trelation - 49',
'\ttokens - 35',
'\texportxml:span - 9',
'\texportxml:comment - 1',
'\tmetadata - 1',
'',
'Edge statistics',
'===============',
'',
'number of edges with layers',
'\texportxml - 1386',
'\texportxml:ne - 65',
'\texportxml:coreference - 48',
'\texportxml:coreferential - 39',
'\texportxml:anaphoric - 8',
'\texportxml:split_antecedent - 3',
'\texportxml:splitrelation - 1',
'',
'number of edges with attributes',
'\tlayers - 1386',
'\tedge_type - 1386',
'\tlabel - 112',
'',
'most common source edges',
'\ttext_22 - 35',
'\ts387_534 - 6',
'\ts379_533 - 6',
'\ts385_530 - 6',
'\ts374_525 - 5',
'',
'most common target edges',
'\ts381_503 - 3',
'\ts381_10 - 3',
'\ts374_507 - 3',
'\ts378_510 - 3',
'\ts369_3 - 2']
def test_read_exportxml():
"""An ExportXML file can be parsed with the expected node/edge attributes."""
exportxml_filepath = os.path.join(dg.DATA_ROOT_DIR, 'exportxml-example.xml')
exportxml_corpus = dg.read_exportxml(exportxml_filepath)
assert isinstance(exportxml_corpus, dg.readwrite.exportxml.ExportXMLCorpus)
assert len(exportxml_corpus) == 3
docgraph_stats = []
for docgraph in exportxml_corpus:
assert isinstance(docgraph, dg.readwrite.exportxml.ExportXMLDocumentGraph)
with Capturing() as output:
dg.info(docgraph)
docgraph_stats.append(output)
assert docgraph_stats == [text_0_stats, text_9_stats, text_22_stats]
exportxml_corpus_debug = dg.read_exportxml(exportxml_filepath, debug=True)
text_elem = next(exportxml_corpus_debug)
assert isinstance(text_elem, lxml.etree._Element)
assert text_elem.tag == 'text'
| |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import copy
import json
import logging
import os
import time
import distutils.util
import jmespath
import requests
from c7n_azure.constants import (
AUTH_TYPE_MSI,
AUTH_TYPE_UAI,
AUTH_TYPE_EMBED,
ENV_CUSTODIAN_DISABLE_SSL_CERT_VERIFICATION,
FUNCTION_EVENT_TRIGGER_MODE,
FUNCTION_TIME_TRIGGER_MODE,
FUNCTION_HOST_CONFIG,
FUNCTION_EXTENSION_BUNDLE_CONFIG)
from c7n_azure.session import Session
from c7n.mu import PythonPackageArchive
from c7n.utils import local_session
class AzurePythonPackageArchive(PythonPackageArchive):
def __init__(self, modules=(), cache_file=None):
super(AzurePythonPackageArchive, self).__init__(modules, cache_file)
self.package_time = time.gmtime()
def create_zinfo(self, file):
"""
In Dedicated App Service Plans - Functions are updated via KuduSync
KuduSync uses the modified time and file size to determine if a file has changed
"""
info = super(AzurePythonPackageArchive, self).create_zinfo(file)
info.date_time = self.package_time[0:6]
return info
class FunctionPackage:
log = logging.getLogger('custodian.azure.function_package.FunctionPackage')
def __init__(self, name, function_path=None, target_sub_ids=None, cache_override_path=None):
self.pkg = None
self.name = name
self.function_path = function_path or os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'function.py')
self.cache_override_path = cache_override_path
self.enable_ssl_cert = not distutils.util.strtobool(
os.environ.get(ENV_CUSTODIAN_DISABLE_SSL_CERT_VERIFICATION, 'no'))
if target_sub_ids is not None:
self.target_sub_ids = target_sub_ids
else:
self.target_sub_ids = [None]
if not self.enable_ssl_cert:
self.log.warning('SSL Certificate Validation is disabled')
def _add_functions_required_files(
self, policy_data, requirements, queue_name=None, identity=None):
s = local_session(Session)
self.pkg.add_contents(dest='requirements.txt',
contents=requirements)
for target_sub_id in self.target_sub_ids:
name = self.name + ("_" + target_sub_id if target_sub_id else "")
# generate and add auth if using embedded service principal
identity = (identity
or jmespath.search(
'mode."provision-options".identity', policy_data)
or {'type': AUTH_TYPE_EMBED})
if identity['type'] == AUTH_TYPE_EMBED:
auth_contents = s.get_functions_auth_string(target_sub_id)
elif identity['type'] == AUTH_TYPE_MSI:
auth_contents = json.dumps({
'use_msi': True, 'subscription_id': target_sub_id,
'tenant_id': s.get_tenant_id()})
elif identity['type'] == AUTH_TYPE_UAI:
auth_contents = json.dumps({
'use_msi': True, 'subscription_id': target_sub_id,
'client_id': identity['client_id'],
'tenant_id': s.get_tenant_id()})
self.pkg.add_contents(dest=name + '/auth.json', contents=auth_contents)
self.pkg.add_file(self.function_path,
dest=name + '/function.py')
self.pkg.add_contents(dest=name + '/__init__.py', contents='')
if policy_data:
self.pkg.add_contents(
dest=name + '/function.json',
contents=self.get_function_config(policy_data, queue_name))
self.pkg.add_contents(
dest=name + '/config.json',
contents=json.dumps({'policies': [policy_data]}, indent=2))
self._add_host_config(policy_data['mode']['type'])
else:
self._add_host_config(None)
def _add_host_config(self, mode):
config = copy.deepcopy(FUNCTION_HOST_CONFIG)
if mode == FUNCTION_EVENT_TRIGGER_MODE:
config['extensionBundle'] = FUNCTION_EXTENSION_BUNDLE_CONFIG
self.pkg.add_contents(dest='host.json', contents=json.dumps(config))
def get_function_config(self, policy, queue_name=None):
config = \
{
"scriptFile": "function.py",
"bindings": [{
"direction": "in"
}]
}
mode_type = policy['mode']['type']
binding = config['bindings'][0]
if mode_type == FUNCTION_TIME_TRIGGER_MODE:
binding['type'] = 'timerTrigger'
binding['name'] = 'input'
binding['schedule'] = policy['mode']['schedule']
elif mode_type == FUNCTION_EVENT_TRIGGER_MODE:
binding['type'] = 'queueTrigger'
binding['connection'] = 'AzureWebJobsStorage'
binding['name'] = 'input'
binding['queueName'] = queue_name
else:
self.log.error("Mode not yet supported for Azure functions (%s)"
% mode_type)
return json.dumps(config, indent=2)
@property
def cache_folder(self):
if self.cache_override_path:
return self.cache_override_path
c7n_azure_root = os.path.dirname(__file__)
return os.path.join(c7n_azure_root, 'cache')
def build(self, policy, modules, requirements, queue_name=None, identity=None):
self.pkg = AzurePythonPackageArchive()
self.pkg.add_modules(None,
[m.replace('-', '_') for m in modules])
# add config and policy
self._add_functions_required_files(policy, requirements, queue_name, identity)
def wait_for_status(self, deployment_creds, retries=10, delay=15):
for r in range(retries):
if self.status(deployment_creds):
return True
else:
self.log.info('(%s/%s) Will retry Function App status check in %s seconds...'
% (r + 1, retries, delay))
time.sleep(delay)
return False
def status(self, deployment_creds):
status_url = '%s/api/deployments' % deployment_creds.scm_uri
r = requests.get(status_url, verify=self.enable_ssl_cert)
if r.status_code != 200:
self.log.error("Application service returned an error.\n%s\n%s"
% (r.status_code, r.text))
return False
return True
def publish(self, deployment_creds):
self.close()
# update perms of the package
os.chmod(self.pkg.path, 0o0644)
zip_api_url = '%s/api/zipdeploy?isAsync=true&synctriggers=true' % deployment_creds.scm_uri
headers = {'content-type': 'application/octet-stream'}
self.log.info("Publishing Function package from %s" % self.pkg.path)
zip_file = self.pkg.get_bytes()
try:
r = requests.post(zip_api_url,
data=zip_file,
headers=headers,
timeout=300,
verify=self.enable_ssl_cert)
except requests.exceptions.ReadTimeout:
self.log.error("Your Function App deployment timed out after 5 minutes. Try again.")
r.raise_for_status()
self.log.info("Function publish result: %s" % r.status_code)
def close(self):
self.pkg.close()
| |
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
from __future__ import with_statement
import sys
import os
import argparse
import time
from VistAMenuUtil import VistAMenuUtil
from LoggerManager import logger, initConsoleLogging, getTempLogFile
from VistATestClient import VistATestClientFactory, createTestClientArgParser
from VistAPackageInfoFetcher import findChoiceNumber
def getBoxVolPair(vistAClient):
connection = vistAClient.getConnection()
vistAClient.waitForPrompt()
connection.send("D GETENV^%ZOSV W Y\r")
vistAClient.waitForPrompt()
retValue = connection.before.split('^')[-1].rstrip(' \r\n')
connection.send('\r')
return retValue
class VistATaskmanUtil(object):
""" Enum for taskman Status """
TASKMAN_STATUS_UNKNOWN = -1
TASKMAN_STATUS_RUNNING_CURRENT = 0
TASKMAN_STATUS_RUNNING_BEHIND = 1
TASKMAN_STATUS_WAIT = 2
TASKMAN_STATUS_SHUTDOWN = 3
TASKMAN_STATUS_ERROR_STATE = 4
TASKMAN_STATUS_LAST = 5
def __init__(self):
pass
def verifyTaskmanSiteParameter(self, vistAClient, autoFix=True):
retValue = True
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
boxVolPair = getBoxVolPair(vistAClient)
logger.debug("Box:Vol Pair is [%s] " % boxVolPair)
menuUtil.gotoTaskmanEditParamMenu(vistAClient)
connection.send("Site Parameters Edit\r")
connection.expect("Select TASKMAN SITE PARAMETERS BOX-VOLUME PAIR: ")
connection.send("?\r")
connection.expect("Answer with TASKMAN SITE PARAMETERS BOX-VOLUME PAIR.*?:")
connection.expect("You may enter a new TASKMAN SITE PARAMETERS")
curBoxVol = connection.before.strip(' \r\n')
curBoxVol = [x.strip(' ') for x in curBoxVol.split('\r\n')]
logger.debug("Box:Vol Pair is [%s] " % curBoxVol)
if boxVolPair not in curBoxVol :
logger.error("taskman site parameter mismatch, current:[%s], correct:[%s]" %
(curBoxVol, boxVolPair))
if autoFix:
self.__fixTaskmanSiteParameter__(connection, curBoxVol[0], boxVolPair)
else:
retValue = False
connection.expect("Select TASKMAN SITE PARAMETERS BOX-VOLUME PAIR: ")
connection.send('\r')
menuUtil.exitTaskmanEditParamMenu(vistAClient)
return retValue
def getTaskmanStatus(self, vistAClient):
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("MTM\r") # Monitor Taskman
curStatus = self.__getTaskmanStatus__(connection)
connection.send("^\r")
menuUtil.exitTaskmanMgrUtilMenu(vistAClient)
return curStatus
@staticmethod
def isTaskmanRunningCurrent(status):
return status == VistATaskmanUtil.TASKMAN_STATUS_RUNNING_CURRENT
@staticmethod
def isTaskmanInWaitState(status):
return status == VistATaskmanUtil.TASKMAN_STATUS_WAIT
def waitTaskmanToCurrent(self, vistAClient, timeOut=120):
DEFAULT_POLL_INTERVAL = 1 # 1 seconds
MaxRetry = timeOut/DEFAULT_POLL_INTERVAL
startRetry = 0
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("MTM\r") # Monitor Taskman
while startRetry < MaxRetry:
curStatus = self.__getTaskmanStatus__(connection)
if self.isTaskmanRunningCurrent(curStatus):
break;
else:
startRetry += 1
time.sleep(DEFAULT_POLL_INTERVAL)
connection.send("\r")
if startRetry >= MaxRetry:
logger.error("Time out while waiting Taskman to Current")
connection.send("^\r")
menuUtil.exitTaskmanMgrUtilMenu(vistAClient)
def stopTaskman(self, vistAClient, shutdownSubMgrs=True,
shutdownActJobs=True):
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("Stop Task Manager\r")
connection.expect("Are you sure you want to stop TaskMan\? ")
connection.send("YES\r")
connection.expect("Should active submanagers shut down after finishing their current tasks\? ")
if shutdownSubMgrs:
connection.send("YES\r")
else:
connection.send("NO\r")
connection.expect("Should active jobs be signaled to stop\? ")
if shutdownActJobs:
connection.send("YES\r")
else:
connection.send("NO\r")
menuUtil.exitTaskmanMgrUtilMenu(vistAClient)
logger.info("Wait 30 seconds for Taskman to stop")
time.sleep(30)
def placeTaskmanToWait(self, vistAClient, shutdownSubMgrs=True):
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("Place Taskman in a WAIT State\r")
connection.expect("Should active submanagers shut down after finishing their current tasks\? ")
if shutdownSubMgrs:
connection.send("YES\r")
else:
connection.send("NO\r")
menuUtil.exitTaskmanMgrUtilMenu(vistAClient)
logger.info("Wait 10 seconds for Taskman to wait state")
time.sleep(10)
def removeTaskmanFromWait(self, vistAClient):
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("Remove Taskman from WAIT State\r")
menuUtil.exitTaskmanMgrUtilMenu(vistAClient)
logger.info("Wait 10 seconds for Taskman to start")
time.sleep(10)
def shutdownAllTasks(self, vistAClient):
self.stopMailManBackgroundFiler(vistAClient)
self.stopHL7BackgroundFiler(vistAClient)
self.stopTaskman(vistAClient)
def stopMailManBackgroundFiler(self, vistAClient):
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoMailmanLocalDeliveryMgrMenu(vistAClient)
connection.send("STOP background filer\r")
connection.expect("Are you sure you want the Background Filers to stop delivering mail\? ")
connection.send("YES\r")
menuUtil.exitMailmanLocalDeliveryMgrMenu(vistAClient)
logger.info("Wait 30 seconds for Mailman backgroud filer to stop")
time.sleep(30)
def stopHL7BackgroundFiler(self, vistAClient):
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoHL7FilerLinkMgrMenu(vistAClient)
connection.send("Stop All Messaging Background Processes\r")
connection.expect("Okay to shut down all Links and Filers\? ")
connection.send("Yes\r")
menuUtil.exitHL7FilerLinkMgrMenu(vistAClient)
logger.info("Wait 30 seconds for HL7 backgroud filer to stop")
time.sleep(30)
""" Start taskman, it will not restart taskman if it is already started """
def startTaskman(self, vistAClient, waitToCurrent=True):
self.verifyTaskmanSiteParameter(vistAClient)
connection = vistAClient.getConnection()
menuUtil = VistAMenuUtil(duz=1)
menuUtil.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("Restart Task Manager\r")
index = connection.expect(["ARE YOU SURE YOU WANT TO RESTART ANOTHER TASKMAN\?",
"ARE YOU SURE YOU WANT TO RESTART TASKMAN\?"])
if index == 0:
connection.send("NO\r")
elif index == 1:
connection.send("YES\r")
connection.expect("Restarting...TaskMan restarted\!")
menuUtil.exitTaskmanMgrUtilMenu(vistAClient)
curStatus = self.getTaskmanStatus(vistAClient)
if self.isTaskmanInWaitState(curStatus):
self.removeTaskmanFromWait(vistAClient)
if waitToCurrent:
if not self.isTaskmanRunningCurrent(curStatus):
self.waitTaskmanToCurrent(vistAClient)
"""
Internal Implementation
"""
""" Fixed the BOX-VOLUME Pair """
def __fixTaskmanSiteParameter__(self, connection, curBoxVol, boxVol):
connection.expect("Select TASKMAN SITE PARAMETERS BOX-VOLUME PAIR: ")
connection.send("%s\r" % curBoxVol)
while True:
index = connection.expect(["BOX-VOLUME PAIR: %s//" % curBoxVol,
"CHOOSE [0-9]+-[0-9]+"])
if index == 0:
break
else:
choice = findChoiceNumber(connection.before, curBoxVol)
if choice:
connection.send('%s\r' % choice)
else:
connection.send('\r') # no match continue
connection.send("%s\r" % boxVol)
connection.expect(["//", ': '])
connection.send("^\r")
def __getTaskmanStatus__(self, connection):
connection.expect("Checking Taskman. ")
connection.expect("Taskman is ")
connection.expect("Checking the Status List:")
statusString = connection.before.strip(' \r\n')
logger.debug("Status String is %s" % statusString)
connection.expect("Node weight status time \$J")
connection.expect("Checking the Schedule List:")
detailedStatus = connection.before.strip(' \r\n')
logger.debug("Detailed Status String is %s" % detailedStatus)
connection.expect("Enter monitor action: UPDATE//")
return self.__taskmanStatusStringToEnum__(statusString, detailedStatus)
""" map taskman status to enum """
def __taskmanStatusStringToEnum__(self, statusString, detailedStatus):
if ( (statusString == "shutting down not running.." or
statusString == "not running..") and
detailedStatus == "The Status List is empty."):
return self.TASKMAN_STATUS_SHUTDOWN
if (statusString == "current.." and
detailedStatus.find("RUN") >= 0):
return self.TASKMAN_STATUS_RUNNING_CURRENT
if (detailedStatus.find("WAIT") >=0 and
detailedStatus.find("Taskman Waiting") >=0):
return self.TASKMAN_STATUS_WAIT
return self.TASKMAN_STATUS_UNKNOWN
DEFAULT_OUTPUT_LOG_FILE_NAME = "TaskmanUtil.log"
def main():
import logging
initConsoleLogging(logging.INFO)
testClientParser = createTestClientArgParser()
parser = argparse.ArgumentParser(description='VistA Taskman Utilities',
parents=[testClientParser])
parser.add_argument('-a', '--action', required=True,
choices=['Start', 'Stop', 'Shutdown'],
help='Start:Start Taskman, Stop:Stop Taskman, Shutdown:Shutdown all tasks')
result = parser.parse_args();
print result
""" create the VistATestClient"""
testClient = VistATestClientFactory.createVistATestClientWithArgs(result)
assert testClient
with testClient as vistAClient:
logFilename = getTempLogFile(DEFAULT_OUTPUT_LOG_FILE_NAME)
print "Log File is %s" % logFilename
vistAClient.setLogFile(logFilename)
taskmanUtil = VistATaskmanUtil()
actionMap = {"Start": taskmanUtil.startTaskman,
"Stop": taskmanUtil.stopTaskman,
"Shutdown": taskmanUtil.shutdownAllTasks}
actionMap[result.action](vistAClient)
if __name__ == '__main__':
main()
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video friendly random augmentation.
This is a moddified copy of EfficientNet RandAugment here:
github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
"""
import inspect
import math
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf2
from tensorflow_addons import image as contrib_image
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf1.convert_to_tensor(image1)
if factor == 1.0:
return tf1.convert_to_tensor(image2)
image1 = tf1.to_float(image1)
image2 = tf1.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf1.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf1.cast(temp, tf1.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf1.cast(tf1.clip_by_value(temp, 0.0, 255.0), tf1.uint8)
def cutout(image, seed, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
seed: the random seed.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf1.shape(image)[0]
image_width = tf1.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf2.random.stateless_uniform(
seed=seed, shape=[], minval=0, maxval=image_height,
dtype=tf1.int32)
cutout_center_width = tf2.random.stateless_uniform(
seed=seed, shape=[], minval=0, maxval=image_width,
dtype=tf1.int32)
lower_pad = tf1.maximum(0, cutout_center_height - pad_size)
upper_pad = tf1.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf1.maximum(0, cutout_center_width - pad_size)
right_pad = tf1.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf1.pad(
tf1.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf1.expand_dims(mask, -1)
mask = tf1.tile(mask, [1, 1, 3])
image = tf1.where(
tf1.equal(mask, 0),
tf1.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, seed, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
del seed
return tf1.where(image < threshold, image, 255 - image)
def solarize_add(image, seed, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
del seed
added_image = tf1.cast(image, tf1.int64) + addition
added_image = tf1.cast(tf1.clip_by_value(added_image, 0, 255), tf1.uint8)
return tf1.where(image < threshold, added_image, image)
def color(image, seed, factor):
"""Equivalent of PIL Color."""
del seed
degenerate = tf1.image.grayscale_to_rgb(tf1.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, seed, factor):
"""Equivalent of PIL Contrast."""
del seed
degenerate = tf1.image.rgb_to_grayscale(image)
# Cast before calling tf1.histogram.
degenerate = tf1.cast(degenerate, tf1.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf1.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf1.reduce_sum(tf1.cast(hist, tf1.float32)) / 256.0
degenerate = tf1.ones_like(degenerate, dtype=tf1.float32) * mean
degenerate = tf1.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf1.image.grayscale_to_rgb(tf1.cast(degenerate, tf1.uint8))
return blend(degenerate, image, factor)
def brightness(image, seed, factor):
"""Equivalent of PIL Brightness."""
del seed
degenerate = tf1.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, seed, bits):
"""Equivalent of PIL Posterize."""
del seed
shift = 8 - bits
return tf1.bitwise.left_shift(tf1.bitwise.right_shift(image, shift), shift)
def rotate(image, seed, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
seed: the random seed.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
del seed
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def flip(image, seed, replace):
del seed
image = tf2.image.flip_left_right(wrap(image))
return unwrap(image, replace)
def translate_x(image, seed, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
del seed
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, seed, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
del seed
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, seed, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
del seed
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, seed, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
del seed
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image, seed):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
seed: the random seed.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
del seed
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf1.to_float(tf1.reduce_min(image))
hi = tf1.to_float(tf1.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf1.to_float(im) * scale + offset
im = tf1.clip_by_value(im, 0.0, 255.0)
return tf1.cast(im, tf1.uint8)
result = tf1.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf1.stack([s1, s2, s3], 2)
return image
def sharpness(image, seed, factor):
"""Implements Sharpness function from PIL using TF ops."""
del seed
orig_image = image
image = tf1.cast(image, tf1.float32)
# Make image 4D for conv operation.
image = tf1.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf1.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf1.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf1.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf1.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf1.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf1.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf1.squeeze(tf1.cast(degenerate, tf1.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf1.ones_like(degenerate)
padded_mask = tf1.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf1.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf1.where(tf1.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image, seed):
"""Implements Equalize function from PIL using TF ops."""
del seed
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf1.cast(im[:, :, c], tf1.int32)
# Compute the histogram of the image channel.
histo = tf1.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf1.where(tf1.not_equal(histo, 0))
nonzero_histo = tf1.reshape(tf1.gather(histo, nonzero), [-1])
step = (tf1.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf1.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf1.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf1.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf1.cond(tf1.equal(step, 0),
lambda: im,
lambda: tf1.gather(build_lut(histo, step), im))
return tf1.cast(result, tf1.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf1.stack([s1, s2, s3], 2)
return image
def invert(image, seed):
"""Inverts the image pixels."""
del seed
image = tf1.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf1.shape(image)
extended_channel = tf1.ones([shape[0], shape[1], 1], image.dtype)
extended = tf1.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf1.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf1.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf1.concat([replace, tf1.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf1.where(
tf1.equal(alpha_channel, 0),
tf1.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf1.reshape(flattened_image, image_shape)
image = tf1.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Flip': flip,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor, seed):
"""With 50% prob turn the tensor negative."""
rnd = tf2.random.stateless_uniform([], seed=seed)
should_flip = tf1.cast(tf1.floor(rnd + 0.5), tf1.bool)
final_tensor = tf1.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level, seed):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level, seed)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level, seed):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level, seed)
return (level,)
def _translate_level_to_arg(level, seed, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level, seed)
return (level,)
def level_to_arg(hparams, seed):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Flip': lambda level: (),
'Invert': lambda level: (),
'Rotate': lambda level: _rotate_level_to_arg(level, seed),
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': lambda level: _shear_level_to_arg(level, seed),
'ShearY': lambda level: _shear_level_to_arg(level, seed),
# pylint:disable=g-long-lambda
'Cutout': lambda level: (int((level/_MAX_LEVEL) *
hparams['cutout_const']),),
'TranslateX': lambda level: _translate_level_to_arg(
level, seed, hparams['translate_const']),
'TranslateY': lambda level: _translate_level_to_arg(
level, seed, hparams['translate_const']),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(
name, seed, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams, seed)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]: # pylint:disable=deprecated-method
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getargspec(func)[0]: # pylint:disable=deprecated-method
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1] # pylint:disable=deprecated-method
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def randaugment(image, num_layers, magnitude, seeds):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
seeds: The random seeds.
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
tf1.logging.info('Using RandAug.')
augmentation_hparams = {
'cutout_const': 10,
'translate_const' :10
}
available_ops = [
'AutoContrast',
'Equalize',
'Flip',
# 'Invert',
'Rotate',
'Posterize',
# 'Solarize',
'Color',
'Contrast',
'Brightness',
'Sharpness',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
# 'Cutout',
# 'SolarizeAdd'
]
for layer_num in range(num_layers):
op_to_select = tf2.random.stateless_uniform(
[], seed=seeds[0], maxval=len(available_ops), dtype=tf1.int32)
random_magnitude = float(magnitude)
with tf1.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf2.random.stateless_uniform(
[], seed=seeds[1], minval=0.2, maxval=0.8, dtype=tf1.float32)
func, _, args = _parse_policy_info(op_name, seeds[2],
prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf1.cond(
tf1.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, seeds[3], *selected_args),
# pylint:enable=g-long-lambda
lambda: image)
return image
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.